networking-sfc-10.0.0/0000775000175000017500000000000013656750461014567 5ustar zuulzuul00000000000000networking-sfc-10.0.0/doc/0000775000175000017500000000000013656750461015334 5ustar zuulzuul00000000000000networking-sfc-10.0.0/doc/requirements.txt0000664000175000017500000000025013656750333020613 0ustar zuulzuul00000000000000sphinx>=2.0.0,!=2.1.0 # BSD openstackdocstheme>=2.0.0 # Apache-2.0 sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD reno>=2.5.0 # Apache-2.0 os-api-ref>=1.5.0 # Apache-2.0 networking-sfc-10.0.0/doc/source/0000775000175000017500000000000013656750461016634 5ustar zuulzuul00000000000000networking-sfc-10.0.0/doc/source/_static/0000775000175000017500000000000013656750461020262 5ustar zuulzuul00000000000000networking-sfc-10.0.0/doc/source/_static/.placeholder0000664000175000017500000000000013656750333022531 0ustar zuulzuul00000000000000networking-sfc-10.0.0/doc/source/conf.py0000775000175000017500000000730613656750333020142 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'openstackdocstheme', 'oslo_config.sphinxext', 'oslo_config.sphinxconfiggen', 'oslo_policy.sphinxext', 'oslo_policy.sphinxpolicygen', 'sphinxcontrib.rsvgconverter', ] # openstackdocstheme options repository_name = 'openstack/networking-sfc' bug_project = 'networking-sfc' bug_tag = '' # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'networking-sfc' copyright = u'2013, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' html_static_path = ['_static'] html_theme = 'openstackdocs' # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual], torctree_only). latex_documents = [ ('index', 'doc-%s.tex' % project, u'Networking SFC Documentation', u'OpenStack Foundation', 'manual', # Specify toctree_only=True for a better document structure of the # generated PDF file. Note that this means the contents of the top # page will be ignored. True), ] latex_elements = { 'preamble': r'\setcounter{tocdepth}{2}', 'extraclassoptions': 'openany,oneside', } # -- Options for oslo_config.sphinxconfiggen --------------------------------- _config_generator_config_files = [ 'networking-sfc.conf', ] def _get_config_generator_config_definition(conf): config_file_path = '../../etc/oslo-config-generator/%s' % conf # oslo_config.sphinxconfiggen appends '.conf.sample' to the filename, # strip file extentension (.conf or .ini). output_file_path = '_static/config_samples/%s' % conf.rsplit('.', 1)[0] return (config_file_path, output_file_path) config_generator_config_file = [ _get_config_generator_config_definition(conf) for conf in _config_generator_config_files ] # -- Options for oslo_policy.sphinxpolicygen --------------------------------- policy_generator_config_file = '../../etc/oslo-policy-generator/policy.conf' sample_policy_basename = '_static/networking-sfc' networking-sfc-10.0.0/doc/source/readme.rst0000664000175000017500000000003613656750333020620 0ustar zuulzuul00000000000000.. include:: ../../README.rst networking-sfc-10.0.0/doc/source/install/0000775000175000017500000000000013656750461020302 5ustar zuulzuul00000000000000networking-sfc-10.0.0/doc/source/install/configuration.rst0000664000175000017500000000436613656750333023712 0ustar zuulzuul00000000000000.. Copyright 2015 Futurewei. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Configuration ============= Controller nodes ---------------- After installing the package, enable the service plugins in neutron-server by adding them in ``neutron.conf`` (typically found in ``/etc/neutron/``):: [DEFAULT] service_plugins = flow_classifier,sfc In the same configuration file, specify the driver to use in the plugins. Here we use the OVS driver:: [sfc] drivers = ovs [flowclassifier] drivers = ovs After that, restart the neutron-server. In devstack, run:: systemctl restart devstack@q-svc In a similar way with systemd setups, you can run:: systemctl restart neutron-server Compute nodes ------------- After installing the package, enable the networking-sfc extension in the Open vSwitch agent. The configuration file name can change, the default one is ``/etc/neutron/plugins/ml2/ml2_conf.ini``. Add the sfc extension:: [agent] extensions = sfc And restart the neutron-openvswitch-agent process. In devstack, run:: systemctl restart devstack@q-agt And with systemd setups you can run:: systemctl restart neutron-openvswitch-agent Database setup -------------- The database is the standard Neutron database with a few more tables, which can be configured with ``neutron-db-manage`` command-line tool: .. code-block:: console neutron-db-manage --subproject networking-sfc upgrade head networking-sfc-10.0.0/doc/source/install/install.rst0000664000175000017500000000350613656750333022504 0ustar zuulzuul00000000000000.. Copyright 2015 Futurewei. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Installation ============ If possible, you should rely on packages provided by your Linux and/or OpenStack distribution: * For Fedora or CentOS, you can install the ``python-networking-sfc`` RPM package provided by the RDO project. If you use ``pip``, follow these steps to install networking-sfc: * `identify the version of the networking-sfc package `_ that matches your OpenStack version: * Ocata: latest 4.0.x version * Newton: latest 3.0.x version * Mitaka: latest 2.0.x version * indicate pip to (a) install precisely this version and (b) take into account OpenStack upper constraints on package versions for dependencies (example for Ocata): .. code-block:: console pip install -c https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt?h=stable/ocata networking-sfc==4.0.0 networking-sfc-10.0.0/doc/source/install/index.rst0000664000175000017500000000175613656750333022152 0ustar zuulzuul00000000000000.. Copyright 2015 Futurewei. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Install Guide ============= .. toctree:: :maxdepth: 2 install configuration networking-sfc-10.0.0/doc/source/user/0000775000175000017500000000000013656750461017612 5ustar zuulzuul00000000000000networking-sfc-10.0.0/doc/source/user/usage.rst0000664000175000017500000000200113656750333021437 0ustar zuulzuul00000000000000.. Copyright 2015 Futurewei. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) ===== Usage ===== To use networking-sfc in a project: .. code-block:: python import networking_sfc networking-sfc-10.0.0/doc/source/user/index.rst0000664000175000017500000000207713656750333021457 0ustar zuulzuul00000000000000.. Copyright 2015 Futurewei. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) =================================== Using the Service Function Chaining =================================== .. toctree:: :maxdepth: 2 usage command_extensions networking-sfc-10.0.0/doc/source/user/command_extensions.rst0000664000175000017500000000535413656750333024246 0ustar zuulzuul00000000000000.. Copyright 2015 Futurewei. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) ================= Command extension ================= Networking-sfc uses python-neutronclient's existing command extension framework for adding required command lines for realizing service function chaining functionality. Refer to `Python-neutronclient command extension `_ for further details. List of New Neutron CLI Commands: --------------------------------- Below listed command lines are introduced for realizing service function chaining. .. code-block:: none flow-classifier-create Create a flow-classifier. flow-classifier-delete Delete a given flow-classifier. flow-classifier-list List flow-classifiers that belong to a given tenant. flow-classifier-show Show information of a given flow-classifier. flow-classifier-update Update flow-classifier information. port-pair-create Create a port-pair. port-pair-delete Delete a given port-pair. port-pair-list List port-pairs that belongs to a given tenant. port-pair-show Show information of a given port-pair. port-pair-update Update port-pair's information. port-pair-group-create Create a port-pair-group. port-pair-group-delete Delete a given port-pair-group. port-pair-group-list List port-pair-groups that belongs to a given tenant. port-pair-group-show Show information of a given port-pair-group. port-pair-group-update Update port-pair-group's information. port-chain-create Create a port-chain. port-chain-delete Delete a given port-chain. port-chain-list List port-chains that belong to a given tenant. port-chain-show Show information of a given port-chain. port-chain-update Update port-chain's information. networking-sfc-10.0.0/doc/source/index.rst0000664000175000017500000000343613656750333020501 0ustar zuulzuul00000000000000.. Copyright 2015 Futurewei. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) .. the main title comes from README.rst .. NOTE(amotoki): The content of this file is NOT rendered in the generated PDF file. This is because toctree_only=False is specified in latex_documents in doc/source/conf.py to get a better structure of the PDF doc. .. NOTE(amotoki): The following "include" and hidden "toctree" directives are the magic to make both HTML and PDF versions of the document properly. The latex builder recognizes the doc structure based on "toctree" directive, while we would like to show the content of README file in the top page of the HTML version. .. include:: readme.rst .. toctree:: :hidden: readme Contents -------- .. toctree:: :maxdepth: 2 install/index user/index configuration/index .. toctree:: :maxdepth: 3 contributor/index .. only:: html .. rubric:: Indices and tables * :ref:`genindex` * :ref:`search` networking-sfc-10.0.0/doc/source/contributor/0000775000175000017500000000000013656750461021206 5ustar zuulzuul00000000000000networking-sfc-10.0.0/doc/source/contributor/sfc_proxy_port_correlation.rst0000664000175000017500000002007613656750333027424 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode =============================================================== Exclusive Port-Pair Group for Non-Transparent Service Functions =============================================================== URL of the launchpad blueprint: https://blueprints.launchpad.net/networking-sfc/+spec/sfc-proxy-port-correlation This specification describes the support for non-transparent Service Functions in SFC Port Chains using a SFC Port Pair Group that is used exclusively by one Port Chain. Non-transparent Service Functions modify the N-tuple header fields of a packet. Problem Description =================== Most legacy Service Functions (SF) do not support SFC encapsulation, such as NSH, and therefore require an SFC Proxy to re-classify a packet that is returned from the egress port of the SF. The SFC Proxy uses the N-tuple values of a packet header to re-classify a packet. The packet N-tuple consists of the following: * Source IP address * Destination IP address * Source TCP/UDP port * Destination TCP/UDP port * IP Protocol However, if the SF is non-transparent (it modifies a part of the N-tuple of a packet), then re-classification cannot be done correctly. See https://datatracker.ietf.org/doc/draft-song-sfc-legacy-sf-mapping/ In addition the SF may dynamically change the mapping of the N-tuple values as the SF operations progress. A mechanism that uses a static N-tuple mapping to adjust for N-tuple changes cannot be employed. Proposed Changes ================ This is an enhancement to the SFC proxy so that it can handle the dynamic changes to N-tuple translation rules of the SF. A solution to the non-transparent SF is to use a SF VM that has multiple instances and assign the port-pairs for each SF instance to a separate Port Chain. This can be done by adding these ports to a SFC Proxy Port Pair Group which operates as a Port Pair Correlation Map instead of a normal Load Distribution function. The Proxy Port Pair Group is configured with multiple Port Pairs that are attached to the SF Instances of a specific non-transparent SF type, such as a Firewall SF. This Port Pair Group is configured to operate as a Port Pair Correlation Map. Each non-transparent SF instance is attached to a single Port Pair. These SF instances may either run on a VM or on a container within a VM. If an SF instance runs within a container, the container sub-port ([1][2]) is used as the ingress and/or egress port of the Port Pair. Each Port Chain is mapped to one of these port-pairs. Packets for a Port Chain arriving at the OVS Integration bridge are steered to the ingress port of the Port Pair assigned to that Port Chain. Packets received back from the SF on its egress port are then mapped back to the corresponding Port Chain. This mechanism avoids the need for the SFC Proxy to re-classify packets returned from the egress port of the non-transparent SF. For example, in the figure below, packets on Port Chain A are steered to Port Pair 1 and sent to the ingress port of SF Instance 1. Packets from the egress port of SF Instance 1 are then mapped back to Port Chain A and are delivered to the next hop in the chain. When a Port Chain is created (or updated) that uses a SFC Proxy PPG, the Port Chain is assigned to one of the Port Pairs in the PPG and the Port Pair is reserved for that Port Chain. If the Port Chain is deleted or the PPG is removed from the Port Chain, its Port Pair becomes available for use by another Port Chain. The Port Pairs in the SFC Proxy Port Pair Group may be hosted on different Compute Nodes as shown in the diagram below. If a Port Chain is created that uses a SFC Proxy Port Pair Group and all the Pairs in that PPG are in use by other Port Chains, an error 'Maximum number of Port Chains reached' is returned. This obviously requires that multiple instances of the non-transparent SF be deployed in either VMs or containers. The number of SF instances that must be deployed and configured as Port Pairs depends on the maximum number of Port Chains that are expected to use that particular SF. However, deploying multiple instances of a SF is easily done in modern data centers. A Port Chain may include multiple SFC Proxy PPGs, each one for a different type of non-transparent SF. For example PPG1 may be a group of non-transparent Firewall SF instances and PPG2 may be a group of non-transparent HTTP Optimizer SF instances. :: Compute Node 1 +------------------------------------------------------------+ | | | OVS Integration Bridge Non-transparent SF | | +--------------------------+ +.........................+ | | | SFC Proxy Port Pair | . . | | | Correlation Map PPG | . VM/Container1 . | | | +.....................+ | . pp1+------------------+ . | | | .Port Chain A <-> pp1 .--------->| Non-transparent | . | | | . .<---------| SF Instance 1 | . | | | . . | . +------------------+ . | | | . . | . VM/Container2 . | | | . . | . pp2+------------------+ . | | | .Port Chain C <-> pp2 .--------->| Non-transparent | . | | | . .<---------| SF Instance 2 | . | | | . . | . +------------------+ . | | +-.---------------------.--+ . . | +----.---------------------.-----.-------------------------.-+ . Compute Node 2 . . . +----.---------------------.-----.-------------------------.-+ | . . . . | | .OVS Integration Bridge . . | | +-.---------------------.--+ . . | | | . . | . VM/Container3 . | | | . . | . pp3+------------------+ . | | | .Port chain X <-> pp3 .--------->| Non-transparent | . | | | . .<---------| SF Instance 3 | . | | | +.....................+ | . +------------------+ . | | | | +.........................+ | | +--------------------------+ | +------------------------------------------------------------+ Alternatives ------------ An alternative mechanism for non-transparent SFs is to mark PPG as exclusive so that it is assigned to one port chain only. This would require a PPG be created for each port chain. The advantage to this approach is that the PPG can be used for load balancing. Data model impact ----------------- Add a "proxy-correlation-map" attribute to the Port Pair Group. This is a Boolean that will enable the Proxy Port Correlation. Add an "exclusive" attribute to the Port Pair Group. This is a Boolean that will enable exclusive use of a Port Pair Group by one Port Chain. REST API impact --------------- Add "proxy-correlation-map": true to the Port Pair Group. Add "exclusive": true to the Port Pair Group. Security impact --------------- None Notifications impact -------------------- None Other end user impact --------------------- None Performance Impact ------------------ None Other deployer impact --------------------- None. Developer impact ---------------- None. Implementation ============== Assignee(s) ----------- * Cathy Zhang (cathy.h.zhang@huawei.com) * Louis Fourie (louis.fourie@huawei.com) Work Items ---------- 1. Extend API port-pair-group-parameter to support "proxy-correlation-map" and the "exclusive" attributes. 2. Extend networking-sfc OVS driver to support "proxy-correlation-map" and "exclusive" attributes. 3. Add unit and functional tests. 4. Update documentation. Dependencies ============ None Testing ======= Unit tests and functional tests will be added. Documentation Impact ==================== None References ========== [1] Neutron Trunk-port https://wiki.openstack.org/wiki/Neutron/TrunkPort [2] VLAN aware VMs https://review.openstack.org/#/c/243786/11/specs/mitaka/vlan-aware-vms.rst networking-sfc-10.0.0/doc/source/contributor/system_design_and_workflow.rst0000664000175000017500000002604613656750333027377 0ustar zuulzuul00000000000000.. Copyright 2015 Futurewei. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) ========================== System Design and Workflow ========================== Problem Description =================== The `Service Chaining API specification `_ proposes a Neutron port based solution for setting up a service chain. A specification on the system architecture and related API work flow is needed to guide the code design. System Architecture =================== The following figure shows the generic architecture of the Port Chain Plugin. As shown in the diagram, Port Chain Plugin can be backed by different service providers such as OVS Driver and/or different types of SDN Controller Drivers. Through the "Common Driver API", these different drivers can provide different implementations for the service chain path rendering. In the first release and deployment based on this release, we will only deliver codes for the OVS driver. In the next release, we can add codes to support multiple active drivers:: Port Chain Plugin With Different Types of Drivers +-----------------------------------------------------------------+ | +-----------------------------------------------------------+ | | | Port Chain API | | | +-----------------------------------------------------------+ | | | Port Chain Database | | | +-----------------------------------------------------------+ | | | Driver Manager | | | +-----------------------------------------------------------+ | | | Common Driver API | | | +-----------------------------------------------------------+ | | | | | +------------+------------------------+---------------------+ | | | OVS Driver | Controller Driver1 | Controller Driver2 | | | +------------+------------------------+---------------------+ | +-------|------------------|-------------------------|------------+ | | | +-----------+ +-----------------+ +-----------------+ | OVS Agent | | SDN Controller1 | | SDN Controller2 | +-----------+ +-----------------+ +-----------------+ The second figure below shows the reference implementation architecture, which is through the OVS Driver path. The figure shows the components that will be added on the Neutron Server and the compute nodes to support this Neutron Based SFC functionality. As shown in the diagram, a new Port Chain Plugin will be added to the Neutron Server. The existing "OVS Driver" and "OVS Agent" will be extended to support the service chain functionality. The OVS Driver will communicate with each OVS Agent to program its OVS forwarding table properly so that a tenant's traffic flow can be steered through the user defined sequence of Neutron ports to get the desired service treatment from the Service Function running on the VMs. A separate `OVS Driver and Agent specification `_ will describe in more detail on the design consideration of the Driver, Agent, and how to set up the classification rules on the OVS to identify different flows and how to set up the OVS forwarding table. In the reference implementation, the OVS Driver communicates with OVS Agent through RPC to program the OVS. The communication between the OVS Agent and the OVS is through OVSDB/Openflow:: Port Chain Plugin With OVS Driver +-------------------------------+ | +-------------------------+ | | | Port Chain API | | | +-------------------------+ | | | Port Chain Database | | | +-------------------------+ | | | Driver Manager | | | +-------------------------+ | | | Common Driver API | | | +-------------------------+ | | | | | +-------------------------+ | | | OVS Driver | | | +-------------------------+ | +-------|----------------|------+ | | +-----------+ +-----------+ | OVS Agent | | OVS Agent | +-----------+ +-----------+ Port Chain Creation Workflow ============================ The following example shows how the Neutron CLI commands may be used to create a port-chain consisting of a service VM vm1 and a service VM vm2. The user can be an Admin/Tenant or an Application built on top. Traffic flow into the Port Chain will be from source IP address 22.1.20.1 TCP port 23 to destination IP address 171.4.5.6 TCP port 100. The flow needs to be treated by SF1 running on VM1 identified by Neutron port pair [p1, p2], SF2 running on VM2 identified by Neutron port pair [p3, p4], and SF3 running on VM3 identified by Neutron port pair [p5, p6]. The net1 should be created before creating Neutron port using existing Neutron API. The design has no restriction on the type of net1, i.e. it can be any type of Neutron network since SFC traffic will be tunneled transparently through the type of communication channels of net1. If the transport between vSwitches is VXLAN, then we will use that VXLAN tunnel (and NOT create another new tunnel) to transport the SFC traffic through. If the transport between vSwitches is Ethernet, then the SFC traffic will be transported through Ethernet. In other words, the SFC traffic will be carried over existing transport channel between vSwitches and the external transport channel between vSwitches is set up for net1 through existing Neutron API and ML2. The built-in OVS backend implements tunneling the original flow packets over VXLAN tunnel. The detailed outer VXLAN tunnel transport format and inner SFC flow format including how to leverage existing OVS's support for MPLS label to carry chain ID will be described in the `Port Chain OVS Driver and Agent specification `_. In the future we can add implementation of tunneling the SFC flow packets over flat L2 Ethernet or L3 IP network or GRE tunnel etc. Boot service VMs and attach ports --------------------------------- Create Neutron ports on network net1:: openstack port create --network net1 p1 openstack port create --network net1 p2 openstack port create --network net1 p3 openstack port create --network net1 p4 openstack port create --network net1 p5 openstack port create --network net1 p6 Boot VM1 from Nova with ports p1 and p2 using two --nic options:: openstack server create --image xxx --nic port-id=p1-id --nic port-id=p2-id vm1 --flavor Boot VM2 from Nova with ports p3 and p4 using two --nic options:: openstack server create --image yyy --nic port-id=p3-id --nic port-id=p4-id vm2 --flavor Boot VM3 from Nova with ports p5 and p6 using two --nic options:: openstack server create --image zzz --nic port-id=p5-id --nic port-id=p6-id vm3 --flavor Alternatively, the user can create each VM with one VNIC and then attach another Neutron port to the VM:: openstack server create --image xxx --nic port-id=p1-id vm1 openstack server add port vm1 p2-id openstack server create --image yyy --nic port-id=p3-id vm2 openstack server add port vm2 p4-id openstack server create --image zzz --nic port-id=p5-id vm3 openstack server add port vm3 p6-id Once the Neutron ports p1 - p6 exist, the Port Chain is created using the steps described below. Create Flow Classifier ---------------------- Create flow-classifier FC1 that matches on source IP address 22.1.20.1 (ingress direction) and destination IP address 171.4.5.6 (egress direction) with TCP connection, source port 23 and destination port 100:: openstack sfc flow classifier create \ --ethertype IPv4 \ --source-ip-prefix 22.1.20.1/32 \ --destination-ip-prefix 172.4.5.6/32 \ --protocol tcp \ --source-port 23:23 \ --destination-port 100:100 FC1 .. note:: When using the (default) OVS driver, the ``--logical-source-port`` parameter is also required Create Port Pair ---------------- Create port-pair PP1 with ports p1 and p2, port-pair PP2 with ports p3 and p4, port-pair PP3 with ports P5 and P6:: openstack sfc port pair create \ --ingress=p1 \ --egress=p2 PP1 openstack sfc port pair create \ --ingress=p3 \ --egress=p4 PP2 openstack sfc port pair create \ --ingress=p5 \ --egress=p6 PP3 Create Port Group ----------------- Create port-pair-group PG1 with port-pair PP1 and PP2, and port-pair-group PG2 with port-pair PP3:: openstack sfc port pair group create \ --port-pair PP1 --port-pair PP2 PG1 openstack sfc port pair group create \ --port-pair PP3 PG2 Create Port Chain ----------------- Create port-chain PC1 with port-group PG1 and PG2, and flow classifier FC1:: openstack sfc port chain create \ --port-pair-group PG1 --port-pair-group PG2 --flow-classifier FC1 PC1 This will result in the Port chain driver being invoked to create the Port Chain. The following diagram illustrates the code execution flow (not the exact codes) for the port chain creation:: PortChainAPIParsingAndValidation: create_port_chain | V PortChainPlugin: create_port_chain | V PortChainDbPlugin: create_port_chain | V DriverManager: create_port_chain | V portchain.drivers.OVSDriver: create_port_chain The vSwitch Driver needs to figure out which switch VM1 is connecting with and which switch VM2 is connecting with (for OVS case, the OVS driver has that information given the VMs' port info). As to the connection setup between the two vSwitches, it should be done through existing ML2 plugin mechanism. The connection between these two vSwitches should already be set up before the user initiates the SFC request. The service chain flow packets will be tunneled through the connecting type/technology (e.g. VXLAN or GRE) between the two vSwitches. For our reference code implementation, we will use VXLAN to show a complete data path setup. Please refer to the `OVS Driver and OVS Agent specification `_ for more detail info. networking-sfc-10.0.0/doc/source/contributor/sfc_ovn_driver.rst0000664000175000017500000003032313656750333024747 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode =========================== Networking-sfc / OVN Driver =========================== https://blueprints.launchpad.net/networking-sfc/+spec/networking-sfc-ovn-driver This specification describes a networking-sfc driver that will interface with a new Logical Port Chain resource API for the `OVN `_ infrastructure. The driver will translate networking-sfc requests into Logical Port Chain resources in the OVN northbound DB. These Logical Port Chain resources are created in OVN by updating the appropriate tables in the OVN northbound database (an ovsdb database). Problem Description =================== networking-sfc allows various drivers to be used. Currently, drivers exist for OVS, ONOS and ODL infrastructures. Service chaining is being added to OVN and a driver is required to interface between networking-sfc and the OVN infrastructure. Proposed Changes ================ The proposed extensions to the OVN northbound DB schema and API are described briefly here. Refer to openvswitch documentation for details. In addition the new OVN driver for networking-sfc will map from networking-sfc requests to Logical Port Chain resources in the OVN northbound DB via the networking-ovn driver. The OVN driver for networking-sfc is shown below. :: +-------------------------------------------------------+ | +-----------------------+ +----------------------+ | | | Port Chain API | | Neutron API | | | +-----------------------+ +----------------------+ | | | Driver Manager | | ML2 Manager | | | +-----------------------+ +----------------------+ | | | Common Driver API | | ML2 Driver API | | | +-----------------------+ +----------------------+ | | | | | | v v | | +=======================+ +----------------------+ | | | networking-sfc / |->| networking-ovn | | | | OVN Driver | | ML2 Driver | | | +=======================+ +----------------------+ | | | Neutron Server| +-----------------------------------|-------------------+ | +-----------------------------------|-------------------+ | v | | +-----------------------+ | | | OVN Northbound DB | | | +-----------------------+ OVS Server | +-------------------------------------------------------+ OVN Northbound Port Chain DB ============================ The proposed OVN northbound DB extensions for Logical Port Chains are shown below with three new resources: - Logical Port Chain - Logical Port Pair Group - Logical Port Pair :: action=sfc port-pair- +---------+ +=========+ groups +===========+ | | | Logical | | Logical | | ACL |------>| Port |-------->| Port Pair | | |1 1| Chain |1 *| Group | +---------+ +=========+ +===========+ ^* port-pairs |1 | | acls |1 v* +---------+ports +---------+1 1 +===========+ | Logical |------>| Logical |<--------| Logical | | Switch |1 *| Switch | inport/ | Port Pair | | | | Port | outport | | +---------+ +---------+ +===========+ The OVN ACL actions are extended to include a SFC action with an external_id to reference the name of the Logical Port Chain (lchain) with which the ACL is associated. The sfc action means that the packet is allowed and steered into the port-chain. Logical Port Chain ------------------ A Logical Port Chain can contain one or more Logical Port Pair Groups. The order of Logical Port Pair Groups in the Logical Port Chain specifies the order of steering packets through the Port Chain from the outport of a Logical Port Pair in one Logical Port Pair Group to the inport of a Logical Port Pair in the next Logical Port Pair Group. Logical Port Pair Group ----------------------- A Logical Port Pair Group can contain one or more Logical Port Pairs and is used to load balance traffic across the Service Functions (Logical Port Pairs) in the Logical Port Pair Group. A Logical Port Pair Group can be a member of multiple Logical Port Chains. Logical Port Pair ----------------- A Logical Port Pair represents the ingress Logical Switch Port and the egress Logical Switch Port of a Service Function. A Logical Port Pair can be a member of only one Logical Port Pair Group. An OVN Logical Switch Port can be a member of only one Logical Port Pair. ACL --- The existing OVN ACL action will be extended to add a sfc action with an external_id to reference the name of the Logical Port Chain with which the ACL is associated. Networking-sfc / OVN Driver =========================== The networking-sfc / OVN driver maps the Port Chain commands to OVN ovn-nbctl commands. Port-chain to lport-chain Mapping --------------------------------- A Port-chain is mapped to a single lport-chain. Port-pair-group to lport-pair-group Mapping ------------------------------------------- A Port-pair-group is mapped to a single lport-pair-group. Port-pair to lport-pair Mapping ------------------------------- A Port-pair is mapped to a single lport-pair. Flow-classifier to OVN ACL Mapping ---------------------------------- Flow-classifers will be mapped to OVN ACLs as follows. A flow-classifier is mapped to a single OVN ACL. When a flow-classifier is created its OVN ACL is created at that time. The OVN ACL is only created when the flow-classifier is associated with the port-chain: Then the driver does: acl-add lswitch direction priority match sfc [lchain=] When a port-chain is updated to add/remove flow-classifiers then the necessary OVN ACLs are created and deleted. If a port-chain that has flow-classifiers associated with it is deleted, then the OVN ACLs associated with those flow-classifiers are deleted. Function Mapping ---------------- +------------------------+----------------------+----------------------------+ | Port Chain Function | OVN Command | Description | +========================+======================+============================+ | create_port_chain | lchain-add, acl-add |Use acl-add when a | | | |port-chain is created | | | |with flow-classifiers | +------------------------+----------------------+----------------------------+ | delete_port_chain | lchain-del, acl-del |Use acl-del to delete all | | | |flow-classifiers associated | | | |with a port-chain | +------------------------+----------------------+----------------------------+ | update_port_chain | lchain-set-port- |Use this OVN command when | | | pair-group |PPGs are added to or | | | |removed from a port-chain | +------------------------+----------------------+----------------------------+ | " | acl-add, acl-del |Use acl-add/del when | | | |flow-classifiers are added | | | |or removed to a port-chain | +------------------------+----------------------+----------------------------+ | create_port_pair_group | lport-pair-group-add | | +------------------------+----------------------+----------------------------+ | delete_port_pair_group | lport-pair-group-del | | +------------------------+----------------------+----------------------------+ | update_port_pair_group | lport-pair-group- |Use this command to add / | | | set-port-pair |port-pairs to a PPG | +------------------------+----------------------+----------------------------+ | create_port_pair | lport-pair-add | | +------------------------+----------------------+----------------------------+ | delete_port_pair | lport-pair-del | | +------------------------+----------------------+----------------------------+ | create_flow_classifier | No action |OVN ACLs are only created | | | |when flow-classifiers are | | | |attached to a port-chain | +------------------------+----------------------+----------------------------+ | delete_flow_classifier | No action | " | +------------------------+----------------------+----------------------------+ Flow-Classifier Mapping ----------------------- +--------------------------------+-------------------------------------------+ | Flow Classifier | OVN ACL Field | +================================+===========================================+ | protocol | ip.protocol | +--------------------------------+-------------------------------------------+ | ethertype | eth.type | +--------------------------------+-------------------------------------------+ | source_port_range_min/max | If protocol = "tcp": min < tcp.src < max, | | | if protocol = "udp": min < udp.src < max | +--------------------------------+-------------------------------------------+ | destination_port_range_min/max | If protocol = "tcp": min < tcp.dst < max, | | | if protocol = "udp": min < udp.dst < max | +--------------------------------+-------------------------------------------+ | src_ip_prefix | If ethertype = "IPv4": ip4.src/mask, | | | if ethertype = "IPv6": ip6.src/mask | +--------------------------------+-------------------------------------------+ | destination_ip_prefix | If ethertype = "IPv4": ip4.dst/mask, | | | if ethertype = "IPv6" ip6.dst/mask | +--------------------------------+-------------------------------------------+ | logical_source_port | If the logical-source-port is specified in| | | the classifier then OVN ACL inport= | | | "logical_source_port.id" and OVN ACL | | | direction=from-port | +--------------------------------+-------------------------------------------+ | logical_destination_port | A single asymmetric port chain will use | | | only the logical-source-port, and not the | | | logical-destination-port | +--------------------------------+-------------------------------------------+ A symmetric port chain is defined with a classifier that must have both a logical-source-port and a logical-destination-port. In this case, symmetric forward and reverse OVN port chains are created. The OVN ACL for the forward chain uses the logical-source-port, and the OVN ACL for the reverse chain uses the logical-destination-port. The OVN ACL for the forward chain has inport="logical-source-port.id" and OVN ACL direction=from-port. The OVN ACL for the reverse chain has inport="logical-destination-port.id" and OVN ACL direction=from-port. Implementation ============== Assignee(s) ----------- Authors of the Specification and Primary contributors: * Cathy Zhang (cathy.h.zhang@huawei.com) * Louis Fourie (louis.fourie@huawei.com) * Farhad Sunavala (farhad.sunavala@huawei.com) * John McDowall (jmcdowall@paloaltonetworks.com) networking-sfc-10.0.0/doc/source/contributor/sfc_port_chain_tap.rst0000664000175000017500000001640413656750333025570 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ==================================== Service Function Tap for Port Chains ==================================== Include the URL of your launchpad blueprint: https://blueprints.launchpad.net/networking-sfc/+spec/sfc-tap-port-pair This specification describes the support for passive Service Functions in SFC Port Chains. Problem Description =================== There are some Service Functions (SF) that operate in a passive mode and only receive packets on the ingress port but do not send packets on an egress port. An example of this is a Service Function that has an Intrusion Detection Service (IDS). In order to include such a SF in a port chain, the packets must be delivered to this SF and also forwarded on to the next downstream SF in the port chain. Proposed Changes ================ The Port Pair Group port-pair-group-parameter attribute allows service specific configuration to be applied to all Service Functions (Port Pairs) in a Port Pair Group. The port-pair-group-parameter will be enhanced to add a "tap-enabled" field. The "tap-enabled" field will apply to all Service Functions in the Port Pair Group. This field is set to "true" to indicate that the data-plane switch behavior will be to send the packets to the ingress port of the SF and also forward these packets to the next hop SF. Each Port Pair in the Port Pair Group will act as a tap by passing packets to the passive SF and also forwarding these packets to the next downstream SF. This Port Pair will only send packets to the ingress port of the SF and not receive any packets from the egress port of the SF. If "tap-enabled" is set to "false" or is not present then default behavior will occur. The tap may be applied at any hop (Port Pair Group) in a Port Chain. Every hop in a Port Chain may be configured as a tap. OVS Driver Implementation ------------------------- If a SF is configured as a tap the OVS Integration bridge will add a tap to replicate packets received from upstream SFs. One copy is sent to the ingress port (P1) of the passive Service Function (SF 1 on VM1). The other copy is sent to the ingress port (P2) of the next downstream Service Function (SF 2 on VM2). :: Compute Node +--------------------------------------------------+ | VM1 VM2 | | +--------------------+ +--------------------+ | | | Service Function 1 | | Service Function 2 | | | | (Passive) | | | | | +--------------------+ +--------------------+ | | P1 |^ P2 |^ P3 |. | | |. |. |. | | |. |. |. | | +----------.-------------------.--------.----+ | | | Tap. . . | | | | ...>....x.........>.......... ...> | | | | | | | | OVS Integration | | | | Bridge | | | +--------------------------------------------+ | | | +--------------------------------------------------+ The tap will work regardless of whether the next hop SF is hosted on the same Compute node as the tap Port Pair as shown above or on another Compute node as shown below. :: Compute Node 1 Compute Node 2 +-------------------------+ +-------------------------+ | VM1 | | VM2 | | +--------------------+ | | +--------------------+ | | | Service Function 1 | | | | Service Function 2 | | | | (Passive) | | | | | | | +--------------------+ | | +--------------------+ | | P1 |^ | | P2 |^ P3 |. | | |. | | |. |. | | |. | | |. |. | | +----------.---------+ | | +------.--------.----+ | | | Tap. | | | | . . | | | | ...>....x........ | | | | ...... ..> | | | | . | | | | . | | | | OVS Integration . | | | | . OVS Integration | | | | Bridge . | | | | . Bridge | | | +------------------.-+ | | +-.------------------+ | | . | | . | +---------------------.---+ +---.---------------------+ ............. Workflow & OVS working details for Tap SF ----------------------------------------- Tap SFs are deployed to monitor/analyze traffic of a network segment. These SFs receive copy of the packet coming out from egress port of default SFs or any logical ports (source/destination) of a service chain. Steps for Tap Port Pair and Port Pair Group creation: 1. Create Port openstack sfc port create --name p1 net1 2. Create Port Pair openstack sfc port pair create tap_pp --ingress p1 --egress p1 3. Create Port Pair Group openstack sfc port pair group create tap_ppg --port-pair tap_pp --tap-enabled=True Apart from sending packet to next-hop SF, the egress port-chain flow in Local Switching Table sends a copy of packet to TAP_CLASSIFIER_TABLE using RESUBMIT action, which does further processing on the Tap destined packet. Following tables are introduced to process Tap destined traffic: 1. TAP_CLASSIFIER_TABLE (Table 7) - This table classifies traffic based on source mac of SF egress port or any logical port and the IP header (MPLS or IP). VLAN tagging and MPLS encapsulation is done on the packet to send to Tap SF. Based on the location of Tap SF, if on same compute node, action is to resubmit to INGRESS_TABLE. If located on another compute node, action is to output packet to tunnel patch port. 2. TAP_TUNNEL_OUTPUT_TABLE (Table 25) - This table belongs to tunnel bridge or 'br-tun'. This table contains the flows which floods Tap SF destined packets to the tunnel ports. Alternatives ------------ None Data model impact ----------------- Add "tap-enabled" to the Port Pair Group parameter. The "tap-enabled" field is set to "true" to enable the tap feature. The "tap-enabled" field is set to "false" to disable the tap feature. REST API impact --------------- Add "tap-enabled": "true" to the port-pair-group-parameter. Security impact --------------- None Notifications impact -------------------- None Other end user impact --------------------- None Performance Impact ------------------ None Other deployer impact --------------------- None. Developer impact ---------------- None. Implementation ============== Assignee(s) ----------- * Cathy Zhang (cathy.h.zhang@huawei.com) * Louis Fourie (louis.fourie@huawei.com) * Farhad Sunavala (farhad.sunavala@huawei.com) * Vikash Kumar (vikash.kumar@oneconvergence.com) Work Items ---------- 1. Extend API port-pair-group-parameter to support "tap-enabled" field. 2. Extend 'networking-sfc' OVS driver to support "tap-enabled" field. 3. Add unit and functional tests. 4. Update documentation. Dependencies ============ None Testing ======= Unit tests and functional tests will be added. Documentation Impact ==================== None References ========== None networking-sfc-10.0.0/doc/source/contributor/ovs_driver_and_agent_workflow.rst0000664000175000017500000002600513656750333030055 0ustar zuulzuul00000000000000.. Copyright 2015 Futurewei. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) ============================= OVS Driver and Agent Workflow ============================= Blueprint about `Common Service chaining driver `_ describes the OVS driver and agent necessity for realizing service function chaining. Problem Description =================== The service chain OVS driver and agents are used to configure back-end Openvswitch devices to render service chaining in the data-plane. The driver manager controls a common service chain API which provides a consistent interface between the service chain manager and different device drivers. Proposed Change =============== Design:: Port Chain Plugin +-------------------------------+ | +-------------------------+ | | | Port Chain API | | | +-------------------------+ | | | Port Chain Database | | | +-------------------------+ | | | Driver Manager | | | +-------------------------+ | | | Common Driver API | | | +-------------------------+ | | | | | +-------------------------+ | | | OVS Driver | | | +-------------------------+ | +-------|----------------|------+ |rpc |rpc +-----------+ +-----------+ | OVS Agent | | OVS Agent | +-----------+ +-----------+ A OVS service chain driver and agents communicate via rpc. OVS Driver ---------- The OVS Driver is extended to support service chaining. The driver interfaces with the OVS agents that reside on each Compute node. The OVS driver is responsible for the following: * Identify the OVS agents that directly connects to the SF instances and establish communication with OVS agents on the Compute nodes. * Send commands to the OVS agents to create bridges, flow tables and flows to steer chain traffic to the SF instances. OVS Agent --------- The OVS agent will manage the OVS using OVSDB commands to create bridges and tables, and install flows to steer chain traffic to the SF instances. Existing tunnels between the Tunnel bridges on each Compute node are used to transport Port Chain traffic between the CNs. The OVS Agent will create these tunnels to transport SFC traffic between Compute nodes on which there are SFs. Each tunnel port has the following attributes: * Name * Local tunnel IP address * Remote tunnel IP address * Tunnel Type: VXLAN, GRE The OVS agent installs additional flows on the Integration bridge and the Tunnel bridge to perform the following functions: * Traffic classification. The Integration bridge classifies traffic from a VM port or Service VM port attached to the Integration bridge. The flow classification is based on the n-tuple rules. * Service function forwarding. The Tunnel bridge forwards service chain packets to the next-hop Compute node via tunnels, or to the next Service VM port on that Compute node. Integration bridge will terminate a Service Function Path. The OVS Agent will use the MPLS header to transport the chain path identifier and chain hop index. The MPLS label will transport the chain path identifier, and the MPLS ttl will transport the chain hop index. The following packet encapsulation will be used:: IPv4 Packet: +----------+------------------------+-------+ |L2 header | IP + UDP dst port=4790 | VXLAN | +----------+------------------------+-------+ -----------------------------+---------------+--------------------+ Original Ethernet, ET=0x8847 | MPLS header | Original IP Packet | -----------------------------+---------------+--------------------+ This is not intended as a general purpose MPLS implementation but rather as a temporary internal mechanism. It is anticipated that the MPLS label will be replaced with an NSH encapsulation (https://datatracker.ietf.org/doc/draft-ietf-sfc-nsh/) once NSH support is available upstream in Open vSwitch. If the service function does not support the header, then the vSwitch will act as Service Function Forwarder (SFF) Proxy which will strip off the header when forwarding the packet to the SF and re-add the header when receiving the packet from the SF. OVS Bridge and Tunnel --------------------- Existing tunnels between the Tunnel bridges on each Compute node are used to transport Port Chain traffic between the CNs:: CN1 CN2 +--------------------------+ +-------------------------+ | +-----+ +-----+ | | +-----+ +-----+ | | | VM1 | | SF1 | | | | SF2 | | SF3 | | | +-----+ +-----+ | | +-----+ +-----+ | | |. ^|. | | ^| |. ^|. | | +----.-----------.-.--+ | | +-.---.---------.-.---+ | | | ............. .. | | | | . ........... . | | | | Integration Bridge. | | | | .Integration Bridge | | | | ......... | | | | ...... ........ | | | +-----------.---------+ | | +-------.--.----------+ | | |. | | .| . | | +-----------.---------+ | | +-------.--.----------+ | | | ................................. ..................> | | Tunnel Bridge |-------------| Tunnel Bridge | | | +---------------------+ | Tunnel | +---------------------+ | | | | | +--------------------=-----+ +-------------------------+ Flow Tables and Flow Rules -------------------------- The OVS Agent adds additional flows (shown above) on the Integration bridge to support Port Chains: 1. Egress Port Chain flows to steer traffic from SFs attached to the Integration bridge to a Tunnel bridge to the next-hop Compute node. These flows may be handled using the OpenFlow Group in the case where there are multiple port-pairs in the next-hop port-pair group. 2. Ingress Port Chain flows on the Tunnel bridge to steer service chain traffic from a tunnel from a previous Compute node to SFs attached to the Integration bridge. 3. Internal Port Chain flows are used to steer service chain traffic from one SF to another SF on the same Compute Node. The Port Chain flow rules have the higher priority, and will not impact the existing flow rules on the Integration bridge. If traffic from SF is not part of a service chain, e.g., DHCP messages, ARP packets etc., it will match the existing flow rules on the Integration bridge. The following tables are used to process Port Chain traffic: * Local Switching Table (Table 0). This existing table has two new flows to handle incoming traffic from the SF egress port and the tunnel port between Compute nodes. * Group Table. This new table is used to select multiple paths for load-balancing across multiple port-pairs in a port-pair group. There are multiple buckets in the group if the next hop is a port-pair group with multiple port-pairs. The group actions will be to send the packet to next hop SF instance. If the next hop port-pair is on another Compute node, the action output to the tunnel port to the next hop Compute node. If the next hop port-pair is on the same Compute node, then the action will be to resubmit to the TUN_TABLE for local chaining process. Local Switching Table (Table 0) Flows ------------------------------------- Traffic from SF Egress port: classify for chain and direct to group:: priority=10,in_port=SF_EGRESS_port,traffic_match_field, actions=strip_vlan,set_tunnel:VNI,group:gid. Traffic from Tunnel port:: priority=10,in_port=TUNNEL_port, actions=resubmit(,TUN_TABLE[type]). Group Table Flows ----------------- The Group table is used for load distribution to spread the traffic load across a port-pair group of multiple port-pairs (SFs of the same type). This uses the hashing of several fields in the packet. There are multiple buckets in the group if the next hop is a port-pair group with multiple port-pairs. The group actions will be to send the packet to next hop SF instances. If the next hop port-pair is on another Compute node, the action output to the tunnel port to the next hop Compute node. If the next hop port-pair is on the same Compute node, then the action will be to resubmit to the TUN_TABLE for local chaining process. The OVSDB command to create a group of type Select with a hash selection method and two buckets is shown below. This is existing OVS functionality. The ip_src,nw_proto,tp_src packet fields are used for the hash:: group_id=gid,type=select,selection_method=hash,fields=ip_src,nw_proto,tp_src bucket=set_field:10.1.1.3->ip_dst,output:10, bucket=set_field:10.1.1.4->ip_dst,output:10 Data Model Impact ----------------- None Alternatives ------------ None Security Impact --------------- None. Notifications Impact -------------------- There will be logging to trouble-shoot and verify correct operation. Other End User Impact --------------------- None. Performance Impact ------------------ It is not expected that these flows will have a significant performance impact. IPv6 Impact ----------- None. Other Deployer Impact --------------------- None Developer Impact ---------------- None Community Impact ---------------- Existing OVS driver and agent functionality will not be affected. Implementation ============== Assignee(s) ----------- * Cathy Zhang (cathy.h.zhang@huawei.com) * Louis Fourie (louis.fourie@huawei.com) * Stephen Wong (stephen.kf.wong@gmail.com) Work Items ---------- * Port Chain OVS driver. * Port Chain OVS agent. * Unit test. Dependencies ============ This design depends upon the proposed `Neutron Service Chaining API extensions `_ Openvswitch. Testing ======= Tempest and functional tests will be created. Documentation Impact ==================== Documented as extension. User Documentation ------------------ Update networking API reference. Update admin guide. Developer Documentation ----------------------- None networking-sfc-10.0.0/doc/source/contributor/ietf_sfc_encapsulation.rst0000664000175000017500000007303113656750333026451 0ustar zuulzuul00000000000000.. Copyright 2017 Intel Corporation. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. IETF SFC Encapsulation ====================== This section explains SFC Encapsulation support in networking-sfc. The link to Launchpad at [4] is an umbrella for SFC Encapsulation work with the following scope: * MPLS correlation support (labels exposed to SFs) * Service Graphs allowing port-chains to be linked together * The IETF SFC Encapsulation protocol, NSH (exposed to SFs), support * No NSH Metadata support SFC Encapsulation is an architectural concept from IETF SFC, which states [1]: *"The SFC Encapsulation provides, at a minimum, SFP identification, and is used by the SFC-aware functions, such as the SFF and SFC-aware SFs. The SFC encapsulation is not used for network packet forwarding. In addition to SFP identification, the SFC Encapsulation carries metadata including data-plane context information."* Metadata is a very important capability of SFC Encapsulation, but it's out of scope for this umbrella of work in networking-sfc. Correlation is the term used to correlate packets to chains, in essence it is the Service Function Path (SFP) information that is part of the SFC Encapsulation. Correlation can be MPLS or NSH (SFC Encapsulation). To clarify, MPLS correlation cannot be strictly called SFC Encapsulation since it doesn't fully encapsulate the packets, amongst other limitations such as available space to carry metadata [1]. However, since it can be used for Service Function Path identification, it is a good workaround to exercise the IETF SFC Encapsulation architectural concept in networking-sfc, when NSH is not desired. Service Graphs is a concept mentioned in [1] but further defined and refined in [5] that builds on top of Reclassification and Branching (from [1]). Service Graphs make use of the full encapsulation of frames the SFC Encapsulation provides, and the Service Function Path information that is carried by it, to create dependencies between SFPs, making sure that there's no "leakage" of frames between paths. The figure below outlines the key elements in a Service Graph:: Branch1 Join1 pc1 --+--> pc2 ------> pc4 | ^ | | --> pc3 --- Branch1: pc1 = initial (source) pc2 = destination pc3 = destination Join1: pc2 = source pc3 = source pc4 = destination Since Port Chains resemble Service Function Paths, with the ``chain_id`` attribute mapping to a Service Path Identifier (SPI), they are used as the SFPs for the Service Graph, and consequently Service Graphs in networking-sfc allow the creation of dependencies between Port Chains (alongside traffic classification criteria, just like a normal Port Chain, via Flow Classifier). Terminology ----------- * **Branching Point**: Or branch point, is a point in a Service Graph that leads to new SFPs. * **Correlation**: Related to SFC Encapsulation, but focused on the fact that a Port Chain (an **SFP**) will be mapped to a unique identifier (the **SPI**) and that the hops of that chain will also have a unique index associated (the **SI**), with the forwarding of traffic based on those two parameters. * **Destination Chain**: A Port Chain that branches from a previous chain (the **Source Chain**), i.e. a dependent chain. A Destination Chain may also be a **Source Chain**. For traffic to be accepted into a Destination Chain, it has to have come from the **Source Chains** that the Destination Chain depends on plus the Destination Chain's own flow classifier (except logical source ports, which will be ignored as that would clash with the traffic coming out of respective Source Chains). * **Initial Chain**: A Port Chain that is not a **Destination Chain**, but may be a **Source Chain** if it's included in a Service Graph. In other words, this chain only matches on a Flow Classifier and takes into account the Logical Source Port defined by it (unlike **Destination Chains**). * **Joining Point**: A point in a Service Graph that merges multiple incoming branches (**Source Chains**) into the same **Destination Chain**. * **NSP**: Network Service Path (same as **SPI**). * **NSI**: Network Service Index (same as **SI**). * **SFP**: Service Function Path. * **SI**: Service Index. * **Source Chain**: The Port Chain that provides a branching point to Destination Chains. A Source Chain may also be an **Initial Chain** or a **Destination Chain**. Traffic that leaves a Source Chain, i.e. the egressing traffic from the last SF of the chain (and encapsulated for that particular chain) will be put into either one or no Destination Chains respective to this Source Chain, depending on whether the flow classifiers of the Destination Chains successfully match on the egressing traffic of the Source Chain. * **SPI**: Service Path Identifier (numerically identifies an **SFP**). Usage ----- In order to create Port Chains with Port Pairs that make use of the NSH correlation (i.e. the Network Service Header (NSH) is exposed to the SFs, so no SFC Proxy is logically instantiated by the networking-sfc backend), the Port Pair's ``correlation`` service function parameter can be used, by setting it to ``nsh`` (default is set to ``None``): ``service_function_parameters: {correlation: 'nsh'}`` Alternatively, the MPLS correlation can be used as a workaround to NSH: ``service_function_parameters: {correlation: 'mpls'}`` Enabling the MPLS correlation doesn't fully encapsulate frames like NSH would, since the MPLS labels are inserted between the Ethernet header and the L3 protocol. By default, port-chains always have their correlation set to ``mpls``: ``chain_parameters: {correlation: 'mpls'}`` A Port Chain can have Port Pair Groups with MPLS-correlated Port Pairs or Port Pairs with no correlation. However, each Port Pair Group can only group Port Pairs that share the same correlation type (to process each hop and expose their feature set in a consistent and predictable way). The SFC OVS driver and agent are smart enough to only apply SFC Proxies to the hops that require so. The MPLS correlation is only recommended when using SFC-proxied Port Pair Groups. In order to use NSH, the Port Chain correlation must be set to ``nsh`` (to clarify, SFC Proxies can also be used with NSH Port Chains, as long as the Port Pairs have no correlation set): ``chain_parameters: {correlation: 'nsh'}`` To create a Service Graph, first create the set of Port Chains that will compose the Service Graph. Then, create the Service Graph itself by referencing the Port Chains needed as a dictionary of source to (list of) destination chains, essentially describing each of the branching points of the chain. The following example, using the OpenStack Client, illustrates this (by creating a graph that starts from an initial chain ``pc1`` which forks into ``pc2`` and ``pc3``, and then joins back into a single chain ``pc4`` (if that's what the user intended) using the MPLS correlation (if using NSH, the flows are equivalent but OpenFlow NSH actions and matches are used instead):: # we assume that the Neutron ports p0..p4 are already created and bound $ openstack sfc port pair create --ingress p1 --egress p1 --service-function-parameters correlation=mpls pp1 $ openstack sfc port pair create --ingress p2 --egress p2 --service-function-parameters correlation=mpls pp2 $ openstack sfc port pair create --ingress p3 --egress p3 --service-function-parameters correlation=mpls pp3 $ openstack sfc port pair create --ingress p4 --egress p4 --service-function-parameters correlation=mpls pp4 $ openstack sfc port pair group create --port-pair pp1 ppg1 $ openstack sfc port pair group create --port-pair pp2 ppg2 $ openstack sfc port pair group create --port-pair pp3 ppg3 $ openstack sfc port pair group create --port-pair pp4 ppg4 $ openstack sfc flow classifier create --protocol udp --source-port 2001 --logical-source-port p0 fc1 $ openstack sfc flow classifier create --protocol udp --source-port 2002 --logical-source-port p0 fc2 $ openstack sfc flow classifier create --protocol udp --source-port 2003 --logical-source-port p0 fc3 $ openstack sfc flow classifier create --protocol udp --source-port 2004 --logical-source-port p0 fc4 $ openstack sfc port chain create --port-pair-group ppg1 --flow-classifier --chain-parameters correlation=mpls fc1 pc1 $ openstack sfc port chain create --port-pair-group ppg2 --flow-classifier --chain-parameters correlation=mpls fc2 pc2 $ openstack sfc port chain create --port-pair-group ppg3 --flow-classifier --chain-parameters correlation=mpls fc3 pc3 $ openstack sfc port chain create --port-pair-group ppg4 --flow-classifier --chain-parameters correlation=mpls fc4 pc4 $ openstack sfc service graph create --branching-point pc1:pc2,pc3 --branching-point pc2:pc4 --branching-point pc3:pc4 sg1 In the Python language, the dictionary of Port Chains provided above via the OpenStack Client would look like this:: { 'port_chains': { 'pc1': ['pc2', 'pc3'], 'pc2': ['pc4'], 'pc3': ['pc4'] } } Note that, because pc2, pc3 and pc4 depend on other chains, their Flow Classifiers' Logical Source Ports will be ignored. To clarify what happens under the hood when using the Open vSwitch driver, let's look at the relevant flows that are generated for the above example: **Table 0**:: priority=30,udp,tp_src=2001,in_port=10 actions=push_mpls:0x8847,set_field:511->mpls_label,set_mpls_ttl(255),group:1 priority=30,udp,tp_src=2002,reg0=0x1fe actions=push_mpls:0x8847,set_field:767->mpls_label,set_mpls_ttl(255),group:2 priority=30,udp,tp_src=2003,reg0=0x1fe actions=push_mpls:0x8847,set_field:1023->mpls_label,set_mpls_ttl(255),group:3 priority=30,udp,tp_src=2004,reg0=0x2fe actions=push_mpls:0x8847,set_field:1279->mpls_label,set_mpls_ttl(255),group:4 priority=30,udp,tp_src=2004,reg0=0x3fe actions=push_mpls:0x8847,set_field:1279->mpls_label,set_mpls_ttl(255),group:4 priority=30,mpls,in_port=11,mpls_label=510 actions=load:0x1fe->NXM_NX_REG0[],pop_mpls:0x0800,resubmit(,0) priority=30,mpls,in_port=12,mpls_label=766 actions=load:0x2fe->NXM_NX_REG0[],pop_mpls:0x0800,resubmit(,0) priority=30,mpls,in_port=13,mpls_label=1022 actions=load:0x3fe->NXM_NX_REG0[],pop_mpls:0x0800,resubmit(,0) priority=30,mpls,in_port=14,mpls_label=1278 actions=pop_mpls:0x0800,NORMAL **Table 5**: (usual flows for sending to table 10 or across tunnel, without proxying) **Table 10**: (usual flows to make traffic ingress into the Service Functions, shown below):: priority=1,mpls,dl_vlan=1,dl_dst=fa:16:3e:97:91:a2,mpls_label=511 actions=pop_vlan,output:11 priority=1,mpls,dl_vlan=1,dl_dst=fa:16:3e:87:2a:ad,mpls_label=767 actions=pop_vlan,output:12 priority=1,mpls,dl_vlan=1,dl_dst=fa:16:3e:77:59:f1,mpls_label=1023 actions=pop_vlan,output:13 priority=1,mpls,dl_vlan=1,dl_dst=fa:16:3e:34:07:f5,mpls_label=1279 actions=pop_vlan,output:14 **Groups Table**: (usual flows for load-balancing and re-writing the destination MAC addresses) Considering that the OF port 10 is p0, 11 is p1, and so on with 14 being p4, there are three important things to notice from the Service Graphs flows above: * At the end of the Source Chains (pc1, pc2 and pc3), instead of the typical flow (in table 0) that would remove the MPLS shim (with ``pop_mpls``) and then use the NORMAL action, the chain's SFP information is written to a register (e.g. ``actions=load:0x1fe->NXM_NX_REG0[]``) and the packet is sent back to the same table to be matched by a Destination Chain. * At the beginning of the Destination Chains (pc2, pc3 and pc4), instead of the typical flow (in table 0) that would match solely on the Flow Classifier (specifically the ingress OF port that comes from the Logical Source Port together with the actual traffic classification definition), a specific SFP information register value will be matched on (e.g. ``reg0=0x1fe``) together with the traffic classification definition from the Flow Classifier but not OF ingress port will be used (i.e. Logical Source Port ignored). * For the case of Joining Points, where a chain is Destination to multiple Source Chains, there will be one flow matching on the register value per Source Chain, the only difference in the entire flow being the value of that register (reflecting each of the Source Chains' SFP infos). Two flows can be seen above in table 0, matching on traffic meant for pc4. Implementation -------------- PPG/SF Correlation ~~~~~~~~~~~~~~~~~~ At the API side, both MPLS and NSH correlations are defined as possible options (values) to the ``correlation`` key in the ``service_function_parameters`` field of the ``port_pair`` resource. Furthermore, Port Pair Groups must include Port Pairs of the same correlation type. The parameter is saved in the database in the same way as any other port-pair parameter, inside the ``sfc_service_function_params`` table (example for NSH):: keyword='correlation' value='nsh' pair_id=PORT_PAIR_UUID The NSH correlation parameter will eventually be fed to the enabled backend, such as Open vSwitch. Through the OVS SFC driver and agent, the vswitches on the multiple nodes where networking-sfc is deployed will be configured with the set of flows that allow classification, encapsulation, decapsulation and forwarding of MPLS tagged or untagged packets. Applying the IETF SFC view to this, Open vSwitch switches thus implement the logical elements of Classifier, Service Function Forwarder (SFF) and SFC Proxy (stateless) [1]. In networking-sfc, the OVS driver talks to the agents on the multiple compute nodes by sending "flow rule" messages to them across the RPC channels. In flow rules, correlation parameters of both port-chains and port-pairs are specified using the ``pc_corr`` and ``pp_corr`` flow rule keys, respectively. Moreover, a ``pp_corr`` key is also specified in each of the hops of the ``next_hops`` flow rule key. Remember: a port-pair-group contains port-pairs that all share the same correlation type, so the comparison between ``pc_corr`` and each of the ``pp_corr`` of the next hops will yield the same result. ``pc_corr`` is the correlation mechanism (SFC Encapsulation) to be used for the entire port-chain. The values may be ``None``, ``'mpls'``, or ``'nsh'``. ``pp_corr`` is the correlation mechanism supported by an individual SF. The values may be ``'None'``, ``'mpls'``, or ``'nsh'``. The backend driver compares ``pc_corr`` and ``pp_corr`` to determine if SFC Proxy is needed for a SF that is not capable of processing the SFC Encapsulation mechanism. For example, if ``pc_corr`` is ``'mpls'`` and ``pp_corr`` is ``None``, then SFC Proxy is needed. The following is an example of an sf_node flow rule (taken from one of the SFC OVS agent's unit tests):: 'nsi': 255, 'ingress': '6331a00d-779b-462b-b0e4-6a65aa3164ef', 'next_hops': [{ 'local_endpoint': '10.0.0.1', 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'weight': 1, 'net_uuid': '8768d2b3-746d-4868-ae0e-e81861c2b4e7', 'network_type': 'vxlan', 'segment_id': 33, 'gw_mac': '00:01:02:03:06:09', 'cidr': '10.0.0.0/8', 'mac_address': '12:34:56:78:cf:23', 'pp_corr': 'nsh' }], 'del_fcs': [], 'group_refcnt': 1, 'node_type': 'sf_node', 'egress': '29e38fb2-a643-43b1-baa8-a86596461cd5', 'next_group_id': 1, 'nsp': 256, 'add_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100, }], 'pc_corr': 'nsh', 'pp_corr': 'nsh', 'id': uuidutils.generate_uuid() It can be seen that ``'nsh'`` appears three times in the flow rule, twice in the root (specifying the correlation of port-chain and port-pair of the current hop) and once inside the single hop of ``next_hops``, regarding its port-pair. The three appearances will dictate how flows (both matches and actions) will be added by the OVS agent. Let's take a look at the possible scenarios: +-+------------------+------------------+-----------------------------------------+ | | Curr Hop pp_corr | Next Hop pp_corr | Action | +=+==================+==================+=========================================+ |1| NSH/MPLS | NSH/MPLS | Egress from SF: match on NSH/MPLS | | | | | to determine next hop | | | | | Ingress to next SF: send NSH/MPLS to SF | +-+------------------+------------------+-----------------------------------------+ |2| NSH/MPLS | None | Egress from SF: match on NSH/MPLS | | | | | to determine next hop | | | | | Ingress to next SF: pop NSH/MPLS first | +-+------------------+------------------+-----------------------------------------+ |3| None | NSH/MPLS | Egress from SF: reclassify packet | | | | | and add new NSH/MPLS | | | | | Ingress to next SF: send NSH/MPLS to SF | +-+------------------+------------------+-----------------------------------------+ |4| None | None | Egress from SF: reclassify packet | | | | | and add new NSH/MPLS | | | | | Ingress to next SF: pop NSH/MPLS first | +-+------------------+------------------+-----------------------------------------+ An important point to make is that correlations cannot be mixed, i.e. if the Port Chain uses the MPLS correlation, then its PPGs cannot include Port Pairs using the NSH correlation, and vice-versa. So, on the table above, consider either NSH or MPLS for any given row, but not both. The following further explains each of the possibilities from the table above. To simplify, the NSH correlation is considered (MPLS is equivalent here). 1. **pp_corr=nsh and every next_hop's pp_corr=nsh** The ingress of this sf_node will not remove the NSH. When egressing from this sf_node, OVS will not attempt to match on the flow_classifier defined in ``add_fcs``, but rather the expected NSH after the SF is done processing the packet (the NSI is supposed to be decremented by 1 by the SF). When preparing the packet to go to the next hop, no attempt at inserting NSH will be done, since the packet already has the correct labels. 2. **pp_corr=nsh and every next_hop's pp_corr=None** The ingress of this sf_node will not remove the NSH. When egressing from this sf_node, OVS will not attempt to match on the flow_classifier defined in ``add_fcs``, but rather the expected NSH after the SF is done processing the packet (the NSI is supposed to be decremented by 1 by the SF). When preparing the packet to go to the next hop, no attempt at inserting NSH will be done, since the packet already has the correct labels. The next hop's own flow rule (not the one shown above) will have an action to first remove the NSH and then forward to the SF. 3. **pp_corr=None and every next_hop's pp_corr=nsh** The ingress of this sf_node will first remove the NSH and then forward to the SF, as its actions. When egressing from this sf_node, OVS will match on the flow-classifier defined in ``add_fcs``, effectively implementing an SFC Proxy and running networking-sfc's "classic" mode. When preparing the packet to go to the next hop, a new NSH needs to be inserted. This is done on Table 0, the same table where ``add_fcs`` was matched. Right before the packets are submitted to the Groups Table, they receive the expected NSH for the next hop. The reason why this can't be done on the ``ACROSS_SUBNET_TABLE`` like when the next_hop's correlation is set to None, is the fact that the choice of labels would be ambiguous. If multiple port-chains share the same port-pair-group at a given hop, then encapsulating/adding NSH as one of ``ACROSS_SUBNET_TABLE``'s actions means that at least one of port-chains will be fed the wrong label and, consequently, leak into a different port-chain. This is due to the fact that, in ``ACROSS_SUBNET_TABLE``, the flow matches only on the destination MAC address of the frame (and that isn't enough to know what chain the frame is part of). So, again, the encapsulation/adding of NSH will have to be done in Table 0 for this specific scenario where in the current hop the packets don't have labels but on the next hop they are expected to. 4. **pp_corr=None and every next_hop's pp_corr=None** This is "classic" networking-sfc. The ingress of this sf_node will first remove the NSH and then forward to the SF, as its actions. When egressing from this sf_node, OVS will match on the flow-classifier defined in ``add_fcs`` effectively implementing an SFC Proxy and running networking-sfc's "classic" mode. When preparing the packet to go to the next hop, a new NSH needs to be inserted, which is done at the ``ACROSS_SUBNET_TABLE``, after a destination port-pair has been chosen with the help of the Groups Table. Service Graphs ~~~~~~~~~~~~~~ At the API side, Service Graphs are presented as a specific resource called ``service_graph``. Besides the attributes ``id``, ``name``, ``description`` and ``project_id``, this resource expects to have a dictionary called ``port_chains`` that maps source chains to (lists of) destination chains. Service Graphs "glue" existing Port Chains, creating dependencies between them, in effect changing the criteria to get into each of the chains by not relying solely on the Flow Classifier anymore (except for the initial chain of the graph). Traffic entering a destination chain of a Service Graph is dependent on its source chain and its own flow classifiers. In the database, Service Graphs are stored as 2 tables: * ``sfc_service_graphs``: This table stores the independent data of each of the Service Graph resources, specifically the name, description and project ID. * ``sfc_service_graph_chain_associations``: This table stores the actual associations between Service Graphs and Port Chains, stating which ones are source chains and which ones are destination chains. Besides the ``service_graph_id`` field (primary key, and foreign key to ``sfc_service_graphs.id``), there are the ``src_chain`` and the ``dst_chain`` fields, each pointing to an ID of a Port Chain, both being foreign keys to ``sfc_port_chains.id``. So, to represent the branching points of the example graph provided in the Usage section above, the following entries would be stored in ``sfc_service_graph_chain_associations``: +----------------+---------+---------+ |service_graph_id|src_chain|dst_chain| +----------------+---------+---------+ | SG1 ID | PC1 ID | PC2 ID | | SG1 ID | PC1 ID | PC3 ID | | SG1 ID | PC2 ID | PC4 ID | | SG1 ID | PC3 ID | PC4 ID | +----------------+---------+---------+ Some of the validations that occur at the database/plugin level are: * Port Chains can't be deleted if they are in use by a graph. * Port Chains can't be updated (to include a different set of Port Pair Groups) if they are in use by a graph. * Service Graphs can't have Port Chain loops or circular paths. * A Port Chain can't be added twice as destination of the same source chain (that would essentially replicate packets). * Port Chains cannot be part of more than one graph at any given time. * Branching points have to support a correlation protocol (MPLS or NSH). * The correlation protocol has to be the same for every included Port Chain. * For a given branching point (destination chain), the traffic classification of each branch has to be different to prevent ambiguity. At the OVS driver level, all of the logic takes place in the postcommit methods, ``create_service_graph_postcommit`` and ``delete_service_graph_postcommit``. At present time, the dictionary of Port Chains that a Service Graph references cannot be updated and, as such, the drivers (not just OVS) don't have to support the update operation. In essence, the OVS driver will look at the ``port_chains`` dictionary of the graph and generate flow rules for every branching point. Each branching point includes both the last path node (the last ``sf_node``) of the respective source chain and each first path node (the ``src_node``) of the respective destination chains. All of these flow rules are meant to replace the flows that the original flow rules (during creation of the Port Chains themselves) had requested the agent to create. The flow rules for the source chains will include a special attribute called ``branch_point``, set to the value of ``True``. This indicates to the agent that this path node's (expected to be the last ``sf_node`` of that chain) NSP and NSI should be saved so that the destination chains can match on them while doing the normal traffic classification (via their own Flow Classifiers). Example:: 'branch_point': True The flow rules for the destination chains will include a special attribute called ``branch_info``, a dictionary with two keys: ``matches`` and ``on_add``. Example:: 'branch_info': { 'matches': set([(2, 254), (3, 254)]), 'on_add': True } ``matches`` contains a set of tuples with the NSP and NSI (``(, )``) to be matched by the particular destination chain. ``on_add`` simply specifies whether the ``matches`` should be used when adding the flow or otherwise when removing the flow - in very much the same fashion as ``add_fcs``/``del_fcs`` for the Flow Classifiers, except that here it's either adding or removing the NSP/NSI matches and never replacing/updating them. For source chains' ``branch_point`` there is no need to have an ``on_add`` since the OpenFlow matches will not change depending on whether we are removing or adding this branch point. Only the actions will change (for relevant flows in Table 0). At the OVS agent level, ``branch_point`` and ``branch_info`` are interpreted in order to generate the appropriate set of flows, replacing the ones originally created by the constituent Port Chains (to clarify, only the flows at the branching points). ``'branch_point': True`` will tell the agent to replace the egress flow from the last ``sf_node``, in Table 0, with a new one whose actions will be to: * copy the NSP and NSI from the MPLS label or NSH into a register: ``reg0``; * remove the MPLS label or NSH; * send the traffic back to Table 0, now without MPLS/NSH but with ``reg0`` set. Example of this flow (using MPLS correlation):: table=0,priority=30,mpls,in_port=8,mpls_label=509 actions=load:0x1fd->NXM_NX_REG0[],pop_mpls:0x0800,resubmit(,0) When ``branch_info`` is set, with ``'on_add': True`` and ``'matches': set([(1, 253))``, the agent will replace the egress flow from the ``src_node`` of the destination chain that is specified in the flow rule, in Table 0, with a different set of matches from a typical ``src_node``: * it will still match on what the Flow Classifiers specify; * but the logical source port match is ignored (there is not in_port=X); * most importantly, it will match on a specified value of ``reg0`` (NSP/NSI). Example of this flow (using MPLS correlation):: table=0,priority=30,udp,reg0=0x1fd actions=push_mpls:0x8847,set_field:767->mpls_label,set_mpls_ttl(255),group:3 With ``'on_add': False``, the agent will replace the above flow with the original flow for the ``src_node`` of that Port Chain, matching only on the Flow Classifiers' fields. Known Limitations ----------------- * Service Graphs is not compatible with Symmetric Port Chains at the moment. Furthermore, Service Graphs are unidirectional; * The MPLS correlation protocol does not provide full frame encapsulation, so the SFC Encapsulation NSH protocol should be used instead; * Every Port Chain has to have a different set of Flow Classifiers, even if the logical source ports are different, even when they are attached to Service Graphs. This is necessary when deploying Port Chains that have Port Pairs with no correlation protocol (to prevent per-hop classification ambiguity), but is a limitation otherwise and hasn't been addressed yet; * SI/NSI is only available at the Open vSwitch driver level, meaning that the networking-sfc API can't consistently manage and persist all of the SFP information (only SPI/NSP) independently of the driver. SI/NSI and SPI/NSP are used by the logical Service Function Forwarders (SFF) that the drivers are expected to control. References ---------- [1] https://datatracker.ietf.org/doc/rfc7665/?include_text=1 [2] http://i.imgur.com/rxzNNUZ.png [3] http://i.imgur.com/nzgatKB.png [4] https://bugs.launchpad.net/networking-sfc/+bug/1587486 [5] https://datatracker.ietf.org/doc/draft-ietf-sfc-nsh/?include_text=1 networking-sfc-10.0.0/doc/source/contributor/index.rst0000664000175000017500000000342713656750333023053 0ustar zuulzuul00000000000000.. Copyright 2015 Futurewei. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) ================= Contributor Guide ================= In the Contributor Guide, you will find information on Networking-SFC lower level programming APIs. There are sections that cover the core pieces of networking-sfc, including its api, command-lines, database, system-design, alembic-migration etc. There are also subsections that describe specific plugins inside networking-sfc. Finally, the developer guide includes information about testing infrastructure. Programming HowTos and Tutorials -------------------------------- .. toctree:: :maxdepth: 1 contribution alembic_migration Networking-SFC Internals ------------------------ .. toctree:: :maxdepth: 1 api system_design_and_workflow ovs_driver_and_agent_workflow sfc_ovn_driver ovs_symmetric_port_chain sfc_port_chain_tap sfc_non_transparent_sf ietf_sfc_encapsulation sfc_proxy_port_correlation networking-sfc-10.0.0/doc/source/contributor/ovs_symmetric_port_chain.rst0000664000175000017500000001317213656750333027053 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ============================================== OVS Driver and Agent for Symmetric Port Chains ============================================== Include the URL of your launchpad blueprint: https://blueprints.launchpad.net/networking-sfc/+spec/symmetric-port-chain-ovs-agent This specification describes OVS driver and agent enhancements to support symmetric Port Chains. Problem Description =================== Work to add the symmetric parameter to the Port Chain API [1] is in progress. This describes the extensions to the networking-sfc OVS driver and agent to support symmetric Port Chain paths. Proposed Changes ================ Two port chain paths are created for a symmetric Port Chain: one path for the forward direction and one for the reverse direction. The SFs in the reverse path (from destination to source) are traversed in reverse order to the SFs in the forward path (from source to destination). Forward path: SF1 ... SFn Reverse path: SFn ... SF1 A symmetric Port Chain is defined with the 'symmetric' attribute. Both the source and destination Logical Ports must be defined for a symmetric Port Chain. If a Port Chain terminates externally via a vrouter the vrouter port attached to the local subnet is used as the destination Logical Port. When a symmetric Port Chain is deleted both the forward and reverse paths are deleted. The steering of chain traffic in the data-plane ensures symmetry: * The source Logical Port in the flow-classifier is used to install OVS rules to match traffic for the forward path. The destination Logical Port in the flow-classifier is used to install OVS rules to match traffic for the reverse path. * Rules must be installed so that the SFs in the reverse path are traversed in reverse order to that of the forward path. * Each Port Pair Group must have a Load Balancer pair: one for the forward direction and the other for the reverse direction. In addition, to ensure that traffic in the forward and reverse directions is delivered to the same SF in a Port Pair Group, these LB pairs must use symmetric hash functions. For symmetric hashing, the source and destination fields from packet header used in the hash function of the reverse LB must be the reverse of the packet header fields used in the hash function of the forward LB. If a source field, such as the source IP address, is used as a hash field in the forward direction, the corresponding destination field, the destination IP address, must be used as the hash field in the reverse direction. The example below shows a symmetric Port Chain that has a forward path and a symmetric reverse path. The Port Chain transits Port Pair Group 1 and Port Pair Group 2. PPG1 consists of service functions SF1a - SF1c, and PPG2 has service functions SF2a - SF2d. Classification rule CLf matches traffic from the source Logical Port and steers it to the forward path. Classification rule CLr matches traffic from the destination Logical Port and steers it to the reverse path. Port Pair Group 1 has a pair of Load Balancers, LB1f to load balance traffic in the forward direction, and LB1r to load balance traffic in the reverse direction. Port Pair Group 2 also has a pair of Load Balancers, LB2f and LB2r. LB1f hashes a certain forward traffic flow to SF1c, and LB1r, using symmetric hashing, hashes the reverse traffic for the same flow to the same SF, SF1c. Similarly, LB2f hashes that forward traffic flow to SF2a, and LB2r hashes the reverse traffic for the same flow to SF2a. :: Port Pair Port Pair Group 1 Group 2 Reverse path ................... +----+ +----+ Forward path . . |SF1a| ----->|SF2a|----------------------- v . | | | +----| |<.... | +---+ +---+----+ . +----+ ....|.|LB1r+----+ . | |VM1|->|CLf|LB1f|-- . |SF1b| . | +----|SF2b| . v +---+ +---+----+ | . | | . | | | . +----+---+ +---+ | . +----+ . | +----| ....|LB2r|CLr|<..|VM2| | ..|SF1c|<... | |SF2c| +----+---+ +---+ -->| |----+ | | | +----|LB2f|-- +----+ +----+ |SF2d| | | +----+ The Load Balancers of the LB pairs may reside on different Compute Nodes. For example, LB1f may be hosted on one Compute Node and LB1r on another Compute Node. Alternatives ------------ None Data model impact ----------------- None REST API impact --------------- None Security impact --------------- None Notifications impact -------------------- None Other end user impact --------------------- None Performance Impact ------------------ None Other deployer impact --------------------- None. Developer impact ---------------- None. Implementation ============== Assignee(s) ----------- * Cathy Zhang (cathy.h.zhang@huawei.com) * Louis Fourie (louis.fourie@huawei.com) * Farhad Sunavala (farhad.sunavala@huawei.com) Work Items ---------- 1. Extend 'networking-sfc' OVS driver to support symmetric port chains. 2. Add unit tests. 3. Add tempest tests. 4. Update documentation. Dependencies ============ None Testing ======= Unit tests and function tests will be added. Documentation Impact ==================== None References ========== [1] https://review.openstack.org/#/c/308274/ networking-sfc-10.0.0/doc/source/contributor/sfc_non_transparent_sf.rst0000664000175000017500000001063213656750333026476 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ================================================= Non-Transparent Service Functions for Port Chains ================================================= URL of the launchpad blueprint: https://blueprints.launchpad.net/networking-sfc/+spec/sfc-non-transparent-sf This specification describes the support for non-transparent Service Functions in SFC Port Chains. Problem Description =================== Service Functions (SF) that do not support SFC encapsulation, such as NSH, require an SFC Proxy to re-classify a packet that is returned from the egress port of the SF. The SFC Proxy uses the N-tuple values of a packet header to re-classify a packet. The packet N-tuple consists of the following: * Source IP address * Destination IP address * Source TCP/UDP port * Destination TCP/UDP port * IP Protocol However, if the SF is non-transparent (it modifies a part of the N-tuple of a packet), then re-classification cannot be done correctly. See https://datatracker.ietf.org/doc/draft-song-sfc-legacy-sf-mapping/ Proposed Changes ================ This is an enhancement to the SFC proxy so that it is configured with the N-tuple translation rules of the SF. In other words how the SF translates the ingress Port N-tuple to the egress Port N-tuple of a packet: SF Ingress port N-tuple => SF Egress port N-Tuple The SFC Proxy can then adjust for the SF translation rules by using this N-tuple mapping. The SFC Proxy applies the N-tuple mapping to packets received from the egress port of the SF before the re-classification function. The Port Pair Group port-pair-group-parameter attribute allows service specific configuration to be applied to all Service Functions (Port Pairs) in a Port Pair Group. The port-pair-group-parameter will be enhanced to add an "n-tuple-map". This is an array of ingress-egress N-tuple value pairs: {ingress-N-tuple-value, egress-N-tuple-value} that are the same as the actual translation done by the SF itself. An example of the CLI format is shown below: n_tuple_map='source_ip_prefix_ingress=10.0.0.9& source_ip_prefix_egress=10.0.0.12& protocol_ingress=icmp& protocol_egress=tcp' The SFC Proxy in the OVS Integration Bridge will apply the "n-tuple-map" to the N-tuple of packets received from the egress port of the SF before they are passed to the re-classification function so that the re-classification rules are matched correctly. :: Compute Node +--------------------------------+ | VM | | +--------------------------+ | | | Non-transparent | | | | Service Function | | | +--------------------------+ | | P1 |^ P2 |. | | |. |. | | +------.------------.------+ | | | . SFC Proxy v | | | | . +-----------+ | | | | . |N-tuple Map| | | | | . +-----------+ | | | | . |Re-classify| | | | | . +-----------+ | | | | . . | | | | .>.... ...> | | | | | | | | OVS Integration | | | | Bridge | | | +--------------------------+ | | | +--------------------------------+ Alternatives ------------ None Data model impact ----------------- Add "n-tuple-map" to the Port Pair Group port-pair-group-parameter attribute. REST API impact --------------- Add "n-tuple-map": "N-TUPLE-MAP" to the port-pair-group-parameter. Security impact --------------- None Notifications impact -------------------- None Other end user impact --------------------- None Performance Impact ------------------ None Other deployer impact --------------------- None. Developer impact ---------------- None. Implementation ============== Assignee(s) ----------- * Cathy Zhang (cathy.h.zhang@huawei.com) * Louis Fourie (louis.fourie@huawei.com) Work Items ---------- 1. Extend API port-pair-group-parameter to support "n-tuple-map" attribute. 2. Extend 'networking-sfc' OVS driver to support "n-tuple-map" attribute. 3. Add unit and functional tests. 4. Update documentation. Dependencies ============ None Testing ======= Unit tests and functional tests will be added. Documentation Impact ==================== None References ========== None networking-sfc-10.0.0/doc/source/contributor/contribution.rst0000664000175000017500000000174513656750333024464 0ustar zuulzuul00000000000000.. Copyright 2015 Futurewei. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) ============ Contribution ============ .. include:: ../../../CONTRIBUTING.rst networking-sfc-10.0.0/doc/source/contributor/alembic_migration.rst0000664000175000017500000000742413656750333025412 0ustar zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Alembic-migration ================= Using alembic-migration, required data modeling for networking-sfc is defined and applied to the database. Refer to `Neutron alembic migration process `_ for further details. The important operations are listed below. Checking migration ------------------ .. code-block:: console neutron-db-manage --subproject networking-sfc check_migration Running branches for networking-sfc ... start_networking_sfc (branchpoint) -> 48072cb59133 (contract) (head) -> 24fc7241aa5 (expand) OK Checking branch information --------------------------- .. code-block:: console neutron-db-manage --subproject networking-sfc branches Running branches for networking-sfc ... start_networking_sfc (branchpoint) -> 48072cb59133 (contract) (head) -> 24fc7241aa5 (expand) OK Checking migration history -------------------------- .. code-block:: console neutron-db-manage --subproject networking-sfc history Running history for networking-sfc ... 9768e6a66c9 -> 5a475fc853e6 (expand) (head), Defining OVS data-model 24fc7241aa5 -> 9768e6a66c9 (expand), Defining flow-classifier data-model start_networking_sfc -> 24fc7241aa5 (expand), Defining Port Chain data-model. start_networking_sfc -> 48072cb59133 (contract) (head), Initial Liberty no-op script. -> start_networking_sfc (branchpoint), start networking-sfc chain Applying changes ---------------- .. code-block:: console neutron-db-manage --subproject networking-sfc upgrade head INFO [alembic.runtime.migration] Context impl MySQLImpl. INFO [alembic.runtime.migration] Will assume non-transactional DDL. Running upgrade for networking-sfc ... INFO [alembic.runtime.migration] Context impl MySQLImpl. INFO [alembic.runtime.migration] Will assume non-transactional DDL. INFO [alembic.runtime.migration] Running upgrade -> start_networking_sfc, start networking-sfc chain INFO [alembic.runtime.migration] Running upgrade start_networking_sfc -> 48072cb59133, Initial Liberty no-op script. INFO [alembic.runtime.migration] Running upgrade start_networking_sfc -> 24fc7241aa5, Defining Port Chain data-model. INFO [alembic.runtime.migration] Running upgrade 24fc7241aa5 -> 9768e6a66c9, Defining flow-classifier data-model INFO [alembic.runtime.migration] Running upgrade 9768e6a66c9 -> 5a475fc853e6, Defining OVS data-model OK Checking current version ------------------------ .. code-block:: console neutron-db-manage --subproject networking-sfc current Running current for networking-sfc ... INFO [alembic.runtime.migration] Context impl MySQLImpl. INFO [alembic.runtime.migration] Will assume non-transactional DDL. 48072cb59133 (head) 5a475fc853e6 (head) OK networking-sfc-10.0.0/doc/source/contributor/api.rst0000664000175000017500000007733613656750333022527 0ustar zuulzuul00000000000000.. Copyright 2015 Futurewei. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) ========= API Model ========= Problem Description =================== Currently Neutron does not support service function chaining. To support service function chaining, Service VMs must be attached at points in the network and then traffic must be steered between these attachment points. Please refer to `Neutron Service Chain blue-print `_ and Bugs `[1] `_ `[2] `_ related to this specification for more information. Proposed Change =============== All Neutron network services and VMs are connected to a Neutron network via Neutron ports. This makes it possible to create a traffic steering model for service chaining that uses only Neutron ports. This traffic steering model has no notion of the actual services attached to these Neutron ports. The service VM hosting the service functions is instantiated and configured, then VNICs are added to the VM and then these VNICs are attached to the network by Neutron ports. Once the service function is attached to Neutron ports, the ports may be included in a "port chain" to allow the service function to provide treatment to the user's traffic. A Port Chain (Service Function Path) consists of: * a set of Neutron ports, to define the sequence of service functions * a set of flow classifiers, to specify the classified traffic flows to enter the chain If a service function has a pair of ports, the first port in the port-pair is the ingress port of the service function, and the second port is the egress port of the service function. If a service function has one bidirectional port, then both ports in the port-pair have the same value. A Port Chain is a directional service chain. The first port of the first port-pair is the head of the service chain. The second port of the last port-pair is the tail of the service chain. A bidirectional service chain would be composed of two unidirectional Port Chains. For example, [{'p1': 'p2'}, {'p3': 'p4'}, {'p5': 'p6'}] represents:: +------+ +------+ +------+ | SF1 | | SF2 | | SF3 | +------+ +------+ +------+ p1| |p2 p3| |p4 p5| |p6 | | | | | | ->---+ +---------+ +----------+ +----> where p1 is the head of the Port Chain and p6 is the tail of the Port Chain, and SF1 has ports p1 and p2, SF2 has ports p3 and p4, and SF3 has ports p5 and p6. In order to create a chain, the user needs to have the actual port objects. The work flow would typically be: 1. create the ports 2. create the chain 3. boot the vm's passing the ports as nic's parameters The sequence of 2. and 3. can be switched. A SF's Neutron port may be associated with more than one Port Chain to allow a service function to be shared by multiple chains. If there is more than one service function instance of a specific type available to meet the user's service requirement, their Neutron ports are included in the port chain as a sub-list. For example, if {p3, p4}, {p7, p8} are the port-pairs of two FW instances, they both may be included in a port chain for load distribution as shown below:: [{'p1': 'p2'}, [{'p3': 'p4'},{'p7': 'p8'}], {'p5': 'p6'}] Flow classifiers are used to select the traffic that can access the chain. Traffic that matches any flow classifier will be directed to the first port in the chain. The flow classifier will be a generic independent module and may be used by other projects like FW, QOS, etc. A flow classifier cannot be part of two different port-chains otherwise ambiguity will arise as to which chain path that flow's packets should go. A check will be made to ensure no ambiguity. But multiple flow classifiers can be associated with a port chain since multiple different types of flows can request the same service treatment path. CLI Commands ~~~~~~~~~~~~ Syntax:: openstack sfc port pair create [-h] [--description ] --ingress --egress [--service-function-parameters ] PORT-PAIR-NAME openstack sfc port pair group create [-h] [--description ] --port-pair [--port-pair-group-parameters ] PORT-PAIR-GROUP-NAME openstack sfc flow classifier create [-h] [--description ] [--protocol ] [--ethertype ] [--source-port :] [--destination-port :] [--source-ip-prefix ] [--destination-ip-prefix ] [--logical-source-port ] [--logical-destination-port ] [--l7-parameters ] FLOW-CLASSIFIER-NAME openstack sfc port chain create [-h] [--description ] --port-pair-group [--flow-classifier ] [--chain-parameters ] PORT-CHAIN-NAME openstack sfc port chain create ------------------------------- The ``sfc port chain create`` command returns the ID of the Port Chain. Each ``--port-pair-group`` option specifies a type of SF. If a chain consists of a sequence of different types of SFs, then the chain will have multiple "port-pair-group"s. There must be at least one "port-pair-group" in the Port Chain. The ``-flow-classifier`` option may be repeated to associate multiple flow classifiers with a port chain, with each classifier identifying a flow. If the flow-classifier is not specified, then no traffic will be steered through the chain. One chain parameter option is currently defined. More parameter options can be added in future extensions to accommodate new requirements. The ``correlation`` parameter is used to specify the type of chain correlation mechanism. This parameter allows different correlation mechanisms to be selected. The chain correlation concept is equivalent to SFC Encapsulation, as defined in RFC 7665. The default is "mpls", but "nsh" is also supported. The ``sfc port chain create`` command returns the ID of a Port chain. A port chain can be created, read, updated and deleted, and when a chain is created/read/updated/deleted, the options that are involved would be based on the CRUD in the "Port Chain" resource table below. openstack sfc port pair group create ------------------------------------ Inside each "port-pair-group", there could be one or more port-pairs. Multiple port-pairs may be included in a "port-pair-group" to allow the specification of a set of functionally equivalent SFs that can be used for load distribution, i.e., the ``--port-pair`` option may be repeated for multiple port-pairs of functionally equivalent SFs. The ``sfc port pair group create`` command returns the ID of a Port Pair group. openstack sfc port pair create ------------------------------ A Port Pair represents a service function instance. The ingress port and the egress port of the service function may be specified. If a service function has one bidirectional port, the ingress port has the same value as the egress port. The ``--service-function-parameters`` option allows the passing of SF specific parameter information to the data path. These include: * The ``correlation`` parameter is used to specify the type of chain correlation mechanism supported by a specific SF. This is needed by the data plane switch to determine how to associate a packet with a chain. This will be set to "none" for now since there is no correlation mechanism supported by the SF. In the future, it can be extended to include "mpls", "nsh", etc.. If this parameter is not specified, it will default to "none". * The ``weight`` parameter is used to specify the weight for each SF for load distribution in a port pair group. This represents a percentage of the traffic to be sent to each SF. The ``sfc port pair create`` command returns the ID of a Port Pair. openstack sfc flow classifier create ------------------------------------ A combination of the "source" options defines the source of the flow. A combination of the "destination" options defines the destination of the flow. The l7_parameter is a place-holder that may be used to support flow classification using L7 fields, such as URL. If an option is not specified, it will default to wildcard value except for ethertype which defaults to 'IPv4', for logical-source-port and logical-destination-port which defaults to none. The ``sfc flow classifier create`` command returns the ID of a flow classifier. Data Model Impact ~~~~~~~~~~~~~~~~~ Data model:: +-------+ +----------+ +------------+ | Port |--------| Port Pair|--------| Port Pairs | | Chain |* *| Groups | 1 *| | +-------+ +----------+ +------------+ |1 | |* +--------------+ | Flow | | Classifiers | +--------------+ New objects: Port Chain * id - Port chain ID. * project_id - Tenant ID. * name - Readable name. * description - Readable description. * port_pair_groups - List of port-pair-group IDs. * flow_classifiers - List of flow-classifier IDs. * chain_parameters - Dict. of chain parameters. * chain_id - Data-plane chain path ID. Port Pair Group * id - Port pair group ID. * project_id - Tenant ID. * name - Readable name. * description - Readable description. * port_pairs - List of service function (Neutron) port-pairs. * port_pair_group_parameters - Dict. of port pair group parameters. Port Pair * id - Port pair ID. * project_id - Tenant ID. * name - Readable name. * description - Readable description. * ingress - Ingress port. * egress - Egress port. * service_function_parameters - Dict. of service function parameters Flow Classifier * id - Flow classifier ID. * project_id - Tenant ID. * name - Readable name. * description - Readable description. * ethertype - Ethertype ('IPv4'/'IPv6'). * protocol - IP protocol. * source_port_range_min - Minimum source protocol port. * source_port_range_max - Maximum source protocol port. * destination_port_range_min - Minimum destination protocol port. * destination_port_range_max - Maximum destination protocol port. * source_ip_prefix - Source IP address or prefix. * destination_ip_prefix - Destination IP address or prefix. * logical_source_port - Neutron source port. * logical_destination_port - Neutron destination port. * l7_parameters - Dictionary of L7 parameters. REST API ~~~~~~~~ Port Chain Operations: +------------+---------------------------+------------------------------------------+ |Operation |URL |Description | +============+===========================+==========================================+ |POST |/sfc/port_chains |Create a Port Chain | +------------+---------------------------+------------------------------------------+ |PUT |/sfc/port_chains/{chain_id}|Update a specific Port Chain | +------------+---------------------------+------------------------------------------+ |DELETE |/sfc/port_chains/{chain_id}|Delete a specific Port Chain | +------------+---------------------------+------------------------------------------+ |GET |/sfc/port_chains |List all Port Chains for specified tenant | +------------+---------------------------+------------------------------------------+ |GET |/sfc/port_chains/{chain_id}|Show information for a specific Port Chain| +------------+---------------------------+------------------------------------------+ Port Pair Group Operations: +------------+--------------------------------+-----------------------------------------------+ |Operation |URL |Description | +============+================================+===============================================+ |POST |/sfc/port_pair_groups |Create a Port Pair Group | +------------+--------------------------------+-----------------------------------------------+ |PUT |/sfc/port_pair_groups/{group_id}|Update a specific Port Pair Group | +------------+--------------------------------+-----------------------------------------------+ |DELETE |/sfc/port_pair_groups/{group_id}|Delete a specific Port Pair Group | +------------+--------------------------------+-----------------------------------------------+ |GET |/sfc/port_pair_groups |List all Port Pair Groups for specified tenant | +------------+--------------------------------+-----------------------------------------------+ |GET |/sfc/port_pair_groups/{group_id}|Show information for a specific Port Pair | +------------+--------------------------------+-----------------------------------------------+ Port Pair Operations: +------------+-------------------------+------------------------------------------+ |Operation |URL |Description | +============+=========================+==========================================+ |POST |/sfc/port_pairs |Create a Port Pair | +------------+-------------------------+------------------------------------------+ |PUT |/sfc/port_pairs/{pair_id}|Update a specific Port Pair | +------------+-------------------------+------------------------------------------+ |DELETE |/sfc/port_pairs/{pair_id}|Delete a specific Port Pair | +------------+-------------------------+------------------------------------------+ |GET |/sfc/port_pairs |List all Port Pairs for specified tenant | +------------+-------------------------+------------------------------------------+ |GET |/sfc/port_pairs/{pair_id}|Show information for a specific Port Pair | +------------+-------------------------+------------------------------------------+ Flow Classifier Operations: +------------+-------------------------------+------------------------------------------------+ |Operation |URL |Description | +============+===============================+================================================+ |POST |/sfc/flow_classifiers |Create a Flow-classifier | +------------+-------------------------------+------------------------------------------------+ |PUT |/sfc/flow_classifiers/{flow_id}|Update a specific Flow-classifier | +------------+-------------------------------+------------------------------------------------+ |DELETE |/sfc/flow_classifiers/{flow_id}|Delete a specific Flow-classifier | +------------+-------------------------------+------------------------------------------------+ |GET |/sfc/flow_classifiers |List all Flow-classifiers for specified tenant | +------------+-------------------------------+------------------------------------------------+ |GET |/sfc/flow_classifiers/{flow_id}|Show information for a specific Flow-classifier | +------------+-------------------------------+------------------------------------------------+ REST API Impact ~~~~~~~~~~~~~~~ The following new resources will be created as a result of the API handling. Port Chain resource: +----------------+----------+--------+---------+----+-------------------------+ |Attribute |Type |Access |Default |CRUD|Description | |Name | | |Value | | | +================+==========+========+=========+====+=========================+ |id |uuid |RO, all |generated|R |Port Chain ID. | +----------------+----------+--------+---------+----+-------------------------+ |project_id |uuid |RO, all |from auth|CR |Tenant ID. | | | | |token | | | +----------------+----------+--------+---------+----+-------------------------+ |name |string |RW, all |'' |CRU |Port Chain name. | +----------------+----------+--------+---------+----+-------------------------+ |description |string |RW, all |'' |CRU |Port Chain description. | +----------------+----------+--------+---------+----+-------------------------+ |port_pair_groups|list(uuid)|RW, all |N/A |CRU |List of port-pair-groups.| +----------------+----------+--------+---------+----+-------------------------+ |flow_classifiers|list(uuid)|RW, all |[] |CRU |List of flow-classifiers.| +----------------+----------+--------+---------+----+-------------------------+ |chain_parameters|dict |RW, all |mpls |CR |Dict. of parameters: | | | | | | |'correlation':String | +----------------+----------+--------+---------+----+-------------------------+ |chain_id |integer |RW, all |Any |CR |Data-plane Chain Path ID.| +----------------+----------+--------+---------+----+-------------------------+ The data-plane chain path ID is normally generated by the data-plane implementation. However, an application may optionally generate its own data-plane chain path ID and apply it to the Port Chain using the chain_id attribute. Port Pair Group resource: +----------------+----------+--------+---------+----+-------------------------+ |Attribute |Type |Access |Default |CRUD|Description | |Name | | |Value | | | +================+==========+========+=========+====+=========================+ |id |uuid |RO, all |generated|R |Port pair group ID. | +----------------+----------+--------+---------+----+-------------------------+ |project_id |uuid |RO, all |from auth|CR |Tenant ID. | | | | |token | | | +----------------+----------+--------+---------+----+-------------------------+ |name |string |RW, all |'' |CRU |Port pair group name. | +----------------+----------+--------+---------+----+-------------------------+ |description |string |RW, all |'' |CRU |Port pair group | | | | | | |description. | +----------------+----------+--------+---------+----+-------------------------+ |port_pairs |list |RW, all |N/A |CRU |List of port-pairs. | +----------------+----------+--------+---------+----+-------------------------+ |port_pair_group |dict |RW, all |'' |CR |Dict. of parameters: | |_parameters | | | | |'lb_fields':String | | | | | | |'service_type':String | +----------------+----------+--------+---------+----+-------------------------+ Port Pair resource: +---------------------------+--------+---------+---------+----+----------------------+ |Attribute Name |Type |Access |Default |CRUD|Description | +===========================+========+=========+=========+====+======================+ |id |uuid |RO, all |generated|R |Port pair ID. | +---------------------------+--------+---------+---------+----+----------------------+ |project_id |uuid |RO, all |from auth|CR |Tenant ID. | | | | |token | | | +---------------------------+--------+---------+---------+----+----------------------+ |name |string |RW, all |'' |CRU |Port pair name. | +---------------------------+--------+---------+---------+----+----------------------+ |description |string |RW, all |'' |CRU |Port pair description.| +---------------------------+--------+---------+---------+----+----------------------+ |ingress |uuid |RW, all |N/A |CR |Ingress port ID. | +---------------------------+--------+---------+---------+----+----------------------+ |egress |uuid |RW, all |N/A |CR |Egress port ID. | +---------------------------+--------+---------+---------+----+----------------------+ |service_function_parameters|dict |RW, all |None |CR |Dict. of parameters: | | | | | | |'correlation':String | | | | | | |'weight':Integer | +---------------------------+--------+---------+---------+----+----------------------+ Flow Classifier resource: +--------------------------+--------+---------+---------+----+-----------------------+ |Attribute Name |Type |Access |Default |CRUD|Description | | | | |Value | | | +==========================+========+=========+=========+====+=======================+ |id |uuid |RO, all |generated|R |Flow-classifier ID. | +--------------------------+--------+---------+---------+----+-----------------------+ |project_id |uuid |RO, all |from auth|CR |Tenant ID. | | | | |token | | | +--------------------------+--------+---------+---------+----+-----------------------+ |name |string |RW, all |'' |CRU |Flow-classifier name. | +--------------------------+--------+---------+---------+----+-----------------------+ |description |string |RW, all |'' |CRU |Flow-classifier | | | | | | |description. | +--------------------------+--------+---------+---------+----+-----------------------+ |ethertype |string |RW, all |'IPv4' |CR |L2 ethertype. Can be | | | | | | |'IPv4' or 'IPv6' only. | +--------------------------+--------+---------+---------+----+-----------------------+ |protocol |string |RW, all |Any |CR |IP protocol name. | +--------------------------+--------+---------+---------+----+-----------------------+ |source_port_range_min |integer |RW, all |Any |CR |Minimum source | | | | | | |protocol port. | +--------------------------+--------+---------+---------+----+-----------------------+ |source_port_range_max |integer |RW, all |Any |CR |Maximum source | | | | | | |protocol port. | +--------------------------+--------+---------+---------+----+-----------------------+ |destination_port_range_min|integer |RW, all |Any |CR |Minimum destination | | | | | | |protocol port. | +--------------------------+--------+---------+---------+----+-----------------------+ |destination_port_range_max|integer |RW, all |Any |CR |Maximum destination | | | | | | |protocol port. | +--------------------------+--------+---------+---------+----+-----------------------+ |source_ip_prefix |CIDR |RW, all |Any |CR |Source IPv4 or IPv6 | | | | | | |prefix. | +--------------------------+--------+---------+---------+----+-----------------------+ |destination_ip_prefix |CIDR |RW, all |Any |CR |Destination IPv4 or | | | | | | |IPv6 prefix. | +--------------------------+--------+---------+---------+----+-----------------------+ |logical_source_port |uuid |RW, all |None |CR |Neutron source port. | +--------------------------+--------+---------+---------+----+-----------------------+ |logical_destination_port |uuid |RW, all |None |CR |Neutron destination | | | | | | |port. | +--------------------------+--------+---------+---------+----+-----------------------+ |l7_parameters |dict |RW, all |Any |CR |Dict. of L7 parameters.| +--------------------------+--------+---------+---------+----+-----------------------+ Json Port-pair create request example:: {"port_pair": {"name": "SF1", "project_id": "d382007aa9904763a801f68ecf065cf5", "description": "Firewall SF instance", "ingress": "dace4513-24fc-4fae-af4b-321c5e2eb3d1", "egress": "aef3478a-4a56-2a6e-cd3a-9dee4e2ec345", } } {"port_pair": {"name": "SF2", "project_id": "d382007aa9904763a801f68ecf065cf5", "description": "Loadbalancer SF instance", "ingress": "797f899e-73d4-11e5-b392-2c27d72acb4c", "egress": "797f899e-73d4-11e5-b392-2c27d72acb4c", } } Json Port-pair create response example:: {"port_pair": {"name": "SF1", "project_id": "d382007aa9904763a801f68ecf065cf5", "description": "Firewall SF instance", "ingress": "dace4513-24fc-4fae-af4b-321c5e2eb3d1", "egress": "aef3478a-4a56-2a6e-cd3a-9dee4e2ec345", "id": "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae", } } {"port_pair": {"name": "SF2", "project_id": "d382007aa9904763a801f68ecf065cf5", "description": "Loadbalancer SF instance", "ingress": "797f899e-73d4-11e5-b392-2c27d72acb4c", "egress": "797f899e-73d4-11e5-b392-2c27d72acb4c", "id": "d11e9190-73d4-11e5-b392-2c27d72acb4c" } } Json Port Pair Group create request example:: {"port_pair_group": {"name": "Firewall_PortPairGroup", "project_id": "d382007aa9904763a801f68ecf065cf5", "description": "Grouping Firewall SF instances", "port_pairs": [ "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae" ], "port_pair_group_parameters": [ "lb_fields: ip_src" ] } } {"port_pair_group": {"name": "Loadbalancer_PortPairGroup", "project_id": "d382007aa9904763a801f68ecf065cf5", "description": "Grouping Loadbalancer SF instances", "port_pairs": [ "d11e9190-73d4-11e5-b392-2c27d72acb4c" ] "port_pair_group_parameters": [ "lb_fields: ip_src" ] } } Json Port Pair Group create response example:: {"port_pair_group": {"name": "Firewall_PortPairGroup", "project_id": "d382007aa9904763a801f68ecf065cf5", "description": "Grouping Firewall SF instances", "port_pairs": [ "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae ], "port_pair_group_parameters": [ "lb_fields: ip_src" ] "id": "4512d643-24fc-4fae-af4b-321c5e2eb3d1", } } {"port_pair_group": {"name": "Loadbalancer_PortPairGroup", "project_id": "d382007aa9904763a801f68ecf065cf5", "description": "Grouping Loadbalancer SF instances", "port_pairs": [ "d11e9190-73d4-11e5-b392-2c27d72acb4c" ], "port_pair_group_parameters": [ "lb_fields: ip_src" ] "id": "4a634d49-76dc-4fae-af4b-321c5e23d651", } } Json Flow Classifier create request example:: {"flow_classifier": {"name": "FC1", "project_id": "1814726e2d22407b8ca76db5e567dcf1", "description": "Flow rule for classifying TCP traffic", "protocol": "TCP", "source_port_range_min": 22, "source_port_range_max": 4000, "destination_port_range_min": 80, "destination_port_range_max": 80, "source_ip_prefix": null, "destination_ip_prefix": "22.12.34.45" } } {"flow_classifier": {"name": "FC2", "project_id": "1814726e2d22407b8ca76db5e567dcf1", "description": "Flow rule for classifying UDP traffic", "protocol": "UDP", "source_port_range_min": 22, "source_port_range_max": 22, "destination_port_range_min": 80, "destination_port_range_max": 80, "source_ip_prefix": null, "destination_ip_prefix": "22.12.34.45" } } Json Flow Classifier create response example:: {"flow_classifier": {"name": "FC1", "project_id": "1814726e2d22407b8ca76db5e567dcf1", "description": "Flow rule for classifying TCP traffic", "protocol": "TCP", "source_port_range_min": 22, "source_port_range_max": 4000, "destination_port_range_min": 80, "destination_port_range_max": 80, "source_ip_prefix": null , "destination_ip_prefix": "22.12.34.45", "id": "4a334cd4-fe9c-4fae-af4b-321c5e2eb051" } } {"flow_classifier": {"name": "FC2", "project_id": "1814726e2d22407b8ca76db5e567dcf1", "description": "Flow rule for classifying UDP traffic", "protocol": "UDP", "source_port_range_min": 22, "source_port_range_max": 22, "destination_port_range_min": 80, "destination_port_range_max": 80, "source_ip_prefix": null , "destination_ip_prefix": "22.12.34.45", "id": "105a4b0a-73d6-11e5-b392-2c27d72acb4c" } } Json Port Chain create request example:: {"port_chain": {"name": "PC1", "project_id": "d382007aa9904763a801f68ecf065cf5", "description": "Steering TCP and UDP traffic first to Firewall and then to Loadbalancer", "flow_classifiers": [ "4a334cd4-fe9c-4fae-af4b-321c5e2eb051", "105a4b0a-73d6-11e5-b392-2c27d72acb4c" ], "port_pair_groups": [ "4512d643-24fc-4fae-af4b-321c5e2eb3d1", "4a634d49-76dc-4fae-af4b-321c5e23d651" ], "chain_id": "10034" } } Json Port Chain create response example:: {"port_chain": {"name": "PC1", "project_id": "d382007aa9904763a801f68ecf065cf5", "description": "Steering TCP and UDP traffic first to Firewall and then to Loadbalancer", "flow_classifiers": [ "4a334cd4-fe9c-4fae-af4b-321c5e2eb051", "105a4b0a-73d6-11e5-b392-2c27d72acb4c" ], "port_pair_groups": [ "4512d643-24fc-4fae-af4b-321c5e2eb3d1", "4a634d49-76dc-4fae-af4b-321c5e23d651" ], "chain_id": "10034", "id": "1278dcd4-459f-62ed-754b-87fc5e4a6751" } } Implementation ============== Assignee(s) ~~~~~~~~~~~ Authors of the Specification and Primary contributors: * Cathy Zhang (cathy.h.zhang@huawei.com) * Louis Fourie (louis.fourie@huawei.com) Other contributors: * Vikram Choudhary (vikram.choudhary@huawei.com) * Swaminathan Vasudevan (swaminathan.vasudevan@hp.com) * Yuji Azama (yuj-azama@rc.jp.nec.com) * Mohan Kumar (nmohankumar1011@gmail.com) * Ramanjaneya (ramanjieee@gmail.com) * Stephen Wong (stephen.kf.wong@gmail.com) * Nicolas Bouthors (Nicolas.BOUTHORS@qosmos.com) * Akihiro Motoki * Paul Carver networking-sfc-10.0.0/doc/source/configuration/0000775000175000017500000000000013656750461021503 5ustar zuulzuul00000000000000networking-sfc-10.0.0/doc/source/configuration/policy-sample.rst0000664000175000017500000000106413656750333025012 0ustar zuulzuul00000000000000================================= Sample networking-sfc Policy File ================================= The following is a sample networking-sfc policy file for adaptation and use. The sample policy can also be viewed in :download:`file form `. .. important:: The sample policy file is auto-generated from networking-sfc when this documentation is built. You must ensure your version of networking-sfc matches the version of this documentation. .. literalinclude:: /_static/networking-sfc.policy.yaml.sample networking-sfc-10.0.0/doc/source/configuration/policy.rst0000664000175000017500000000045613656750333023537 0ustar zuulzuul00000000000000======================= networking-sfc policies ======================= The following is an overview of all available policies in networking-sfc. For a sample configuration file, refer to :doc:`/configuration/policy-sample`. .. show-policy:: :config-file: etc/oslo-policy-generator/policy.conf networking-sfc-10.0.0/doc/source/configuration/samples/0000775000175000017500000000000013656750461023147 5ustar zuulzuul00000000000000networking-sfc-10.0.0/doc/source/configuration/samples/networking-sfc.rst0000664000175000017500000000043713656750333026643 0ustar zuulzuul00000000000000========================== Sample networking-sfc.conf ========================== This sample configuration can also be viewed in `the raw format <../../_static/config_samples/networking-sfc.conf.sample>`_. .. literalinclude:: ../../_static/config_samples/networking-sfc.conf.sample networking-sfc-10.0.0/doc/source/configuration/index.rst0000664000175000017500000000132513656750333023343 0ustar zuulzuul00000000000000=================== Configuration Guide =================== Configuration ------------- This section provides a list of all possible options for each configuration file. networking-sfc uses the following configuration file. .. toctree:: :maxdepth: 1 networking-sfc The following is a sample configuration file for networking-sfc. It is generated from code and reflect the current state of code in the networking-sfc repository. .. toctree:: :maxdepth: 1 samples/networking-sfc Policy ------ networking-sfc, like most OpenStack projects, uses a policy language to restrict permissions on REST API actions. .. toctree:: :maxdepth: 1 Policy Reference Sample Policy File networking-sfc-10.0.0/doc/source/configuration/networking-sfc.rst0000664000175000017500000000021613656750333025172 0ustar zuulzuul00000000000000=================== networking-sfc.conf =================== .. show-options:: :config-file: etc/oslo-config-generator/networking-sfc.conf networking-sfc-10.0.0/doc/api_samples/0000775000175000017500000000000013656750461017631 5ustar zuulzuul00000000000000networking-sfc-10.0.0/doc/api_samples/sfc-classifiers/0000775000175000017500000000000013656750461022711 5ustar zuulzuul00000000000000networking-sfc-10.0.0/doc/api_samples/sfc-classifiers/flow-classifier-update-resp.json0000664000175000017500000000075013656750333031124 0ustar zuulzuul00000000000000{ "flow_classifier": { "id": "4a334cd4-fe9c-4fae-af4b-321c5e2eb051", "name": "FC1", "tenant_id": "1814726e2d22407b8ca76db5e567dcf1", "description": "Flow rule for classifying TCP traffic", "protocol": "TCP", "source_port_range_min": 100, "source_port_range_max": 4000, "destination_port_range_min": 80, "destination_port_range_max": 80, "source_ip_prefix": null, "destination_ip_prefix": "22.12.34.65" } } networking-sfc-10.0.0/doc/api_samples/sfc-classifiers/flow-classifier-create-resp.json0000664000175000017500000000074713656750333031113 0ustar zuulzuul00000000000000{ "flow_classifier": { "name": "FC1", "tenant_id": "1814726e2d22407b8ca76db5e567dcf1", "description": "Flow rule for classifying TCP traffic", "protocol": "TCP", "source_port_range_min": 22, "source_port_range_max": 4000, "destination_port_range_min": 80, "destination_port_range_max": 80, "source_ip_prefix": null, "destination_ip_prefix": "22.12.34.45", "id": "4a334cd4-fe9c-4fae-af4b-321c5e2eb051" } } networking-sfc-10.0.0/doc/api_samples/sfc-classifiers/flow-classifier-list-resp.json0000664000175000017500000000103513656750333030612 0ustar zuulzuul00000000000000{ "flow_classifiers": [ { "id": "4a334cd4-fe9c-4fae-af4b-321c5e2eb051", "name": "FC1", "tenant_id": "1814726e2d22407b8ca76db5e567dcf1", "description": "Flow rule for classifying TCP traffic", "protocol": "TCP", "source_port_range_min": 100, "source_port_range_max": 4000, "destination_port_range_min": 80, "destination_port_range_max": 80, "source_ip_prefix": null, "destination_ip_prefix": "22.12.34.65" } ] } networking-sfc-10.0.0/doc/api_samples/sfc-classifiers/flow-classifier-update-req.json0000664000175000017500000000035613656750333030744 0ustar zuulzuul00000000000000{ "flow_classifier": { "id": "4a334cd4-fe9c-4fae-af4b-321c5e2eb051", "name": "FC1", "tenant_id": "1814726e2d22407b8ca76db5e567dcf1", "description": "Flow rule for classifying TCP traffic" } } networking-sfc-10.0.0/doc/api_samples/sfc-classifiers/flow-classifier-get-resp.json0000664000175000017500000000075013656750333030421 0ustar zuulzuul00000000000000{ "flow_classifier": { "id": "4a334cd4-fe9c-4fae-af4b-321c5e2eb051", "name": "FC1", "tenant_id": "1814726e2d22407b8ca76db5e567dcf1", "description": "Flow rule for classifying TCP traffic", "protocol": "TCP", "source_port_range_min": 100, "source_port_range_max": 4000, "destination_port_range_min": 80, "destination_port_range_max": 80, "source_ip_prefix": null, "destination_ip_prefix": "22.12.34.65" } } networking-sfc-10.0.0/doc/api_samples/sfc-classifiers/flow-classifier-create-req.json0000664000175000017500000000066013656750333030723 0ustar zuulzuul00000000000000{ "flow_classifier": { "name": "FC1", "tenant_id": "1814726e2d22407b8ca76db5e567dcf1", "description": "Flow rule for classifying TCP traffic", "protocol": "TCP", "source_port_range_min": 22, "source_port_range_max": 4000, "destination_port_range_min": 80, "destination_port_range_max": 80, "source_ip_prefix": null, "destination_ip_prefix": "22.12.34.45" } } networking-sfc-10.0.0/doc/api_samples/sfc-chains/0000775000175000017500000000000013656750461021647 5ustar zuulzuul00000000000000networking-sfc-10.0.0/doc/api_samples/sfc-chains/port-chain-create-req.json0000664000175000017500000000072013656750333026631 0ustar zuulzuul00000000000000{ "port_chain": { "name": "PC1", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Port chain with Firewall and IPS SFs", "flow_classifiers": [ "4a334cd4-fe9c-4fae-af4b-321c5e2eb051", "105a4b0a-73d6-11e5-b392-2c27d72acb4c" ], "port_pair_groups": [ "4512d643-24fc-4fae-af4b-321c5e2eb3d1", "4a634d49-76dc-4fae-af4b-321c5e23d651" ] } } networking-sfc-10.0.0/doc/api_samples/sfc-chains/port-chain-update-resp.json0000664000175000017500000000100613656750333027030 0ustar zuulzuul00000000000000{ "port_chain": { "name": "PC1", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Port chain with Firewall and IPS SFs", "flow_classifiers": [ "4a334cd4-fe9c-4fae-af4b-321c5e2eb051", "105a4b0a-73d6-11e5-b392-2c27d72acb4c" ], "port_pair_groups": [ "4512d643-24fc-4fae-af4b-321c5e2eb3d1", "4a634d49-76dc-4fae-af4b-321c5e23d651" ], "id": "1278dcd4-459f-62ed-754b-87fc5e4a6751" } } networking-sfc-10.0.0/doc/api_samples/sfc-chains/port-chain-get-resp.json0000664000175000017500000000100613656750333026325 0ustar zuulzuul00000000000000{ "port_chain": { "id": "1278dcd4-459f-62ed-754b-87fc5e4a6751", "name": "PC1", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Port chain with Firewall and IPS SFs", "flow_classifiers": [ "4a334cd4-fe9c-4fae-af4b-321c5e2eb051", "105a4b0a-73d6-11e5-b392-2c27d72acb4c" ], "port_pair_groups": [ "4512d643-24fc-4fae-af4b-321c5e2eb3d1", "4a634d49-76dc-4fae-af4b-321c5e23d651" ] } } networking-sfc-10.0.0/doc/api_samples/sfc-chains/port-chain-create-resp.json0000664000175000017500000000100613656750333027011 0ustar zuulzuul00000000000000{ "port_chain": { "name": "PC1", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Port chain with Firewall and IPS SFs", "flow_classifiers": [ "4a334cd4-fe9c-4fae-af4b-321c5e2eb051", "105a4b0a-73d6-11e5-b392-2c27d72acb4c" ], "port_pair_groups": [ "4512d643-24fc-4fae-af4b-321c5e2eb3d1", "4a634d49-76dc-4fae-af4b-321c5e23d651" ], "id": "1278dcd4-459f-62ed-754b-87fc5e4a6751" } } networking-sfc-10.0.0/doc/api_samples/sfc-chains/port-chain-update-req.json0000664000175000017500000000100613656750333026646 0ustar zuulzuul00000000000000{ "port_chain": { "id": "1278dcd4-459f-62ed-754b-87fc5e4a6751", "name": "PC1", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Port chain with Firewall and IPS SFs", "flow_classifiers": [ "4a334cd4-fe9c-4fae-af4b-321c5e2eb051", "105a4b0a-73d6-11e5-b392-2c27d72acb4c" ], "port_pair_groups": [ "4512d643-24fc-4fae-af4b-321c5e2eb3d1", "4a634d49-76dc-4fae-af4b-321c5e23d651" ] } } networking-sfc-10.0.0/doc/api_samples/sfc-chains/port-chain-list-resp.json0000664000175000017500000000111313656750333026520 0ustar zuulzuul00000000000000{ "port_chains": [ { "name": "PC1", "id": "1278dcd4-459f-62ed-754b-87fc5e4a6751", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Port chain with Firewall and IPS SFs", "flow_classifiers": [ "4a334cd4-fe9c-4fae-af4b-321c5e2eb051", "105a4b0a-73d6-11e5-b392-2c27d72acb4c" ], "port_pair_groups": [ "4512d643-24fc-4fae-af4b-321c5e2eb3d1", "4a634d49-76dc-4fae-af4b-321c5e23d651" ] } ] } networking-sfc-10.0.0/doc/api_samples/sfc-port-pairs/0000775000175000017500000000000013656750461022502 5ustar zuulzuul00000000000000networking-sfc-10.0.0/doc/api_samples/sfc-port-pairs/port-pair-update-req.json0000664000175000017500000000023613656750333027356 0ustar zuulzuul00000000000000{ "port_pair": { "name": "SF1", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Firewall SF instance" } } networking-sfc-10.0.0/doc/api_samples/sfc-port-pairs/port-pair-get-resp.json0000664000175000017500000000051513656750333027035 0ustar zuulzuul00000000000000{ "port_pair": { "name": "SF1", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Firewall SF instance", "ingress": "dace4513-24fc-4fae-af4b-321c5e2eb3d1", "egress": "aef3478a-4a56-2a6e-cd3a-9dee4e2ec345", "id": "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae" } } networking-sfc-10.0.0/doc/api_samples/sfc-port-pairs/port-pair-create-resp.json0000664000175000017500000000051513656750333027521 0ustar zuulzuul00000000000000{ "port_pair": { "name": "SF1", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Firewall SF instance", "ingress": "dace4513-24fc-4fae-af4b-321c5e2eb3d1", "egress": "aef3478a-4a56-2a6e-cd3a-9dee4e2ec345", "id": "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae" } } networking-sfc-10.0.0/doc/api_samples/sfc-port-pairs/port-pair-update-resp.json0000664000175000017500000000051513656750333027540 0ustar zuulzuul00000000000000{ "port_pair": { "name": "SF1", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Firewall SF instance", "ingress": "dace4513-24fc-4fae-af4b-321c5e2eb3d1", "egress": "aef3478a-4a56-2a6e-cd3a-9dee4e2ec345", "id": "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae" } } networking-sfc-10.0.0/doc/api_samples/sfc-port-pairs/port-pair-list-resp.json0000664000175000017500000000057313656750333027235 0ustar zuulzuul00000000000000{ "port_pairs": [ { "name": "SF1", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Firewall SF instance", "ingress": "dace4513-24fc-4fae-af4b-321c5e2eb3d1", "egress": "aef3478a-4a56-2a6e-cd3a-9dee4e2ec345", "id": "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae" } ] } networking-sfc-10.0.0/doc/api_samples/sfc-port-pairs/port-pair-create-req.json0000664000175000017500000000042513656750333027337 0ustar zuulzuul00000000000000{ "port_pair": { "name": "SF1", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Firewall SF instance", "ingress": "dace4513-24fc-4fae-af4b-321c5e2eb3d1", "egress": "aef3478a-4a56-2a6e-cd3a-9dee4e2ec345" } } networking-sfc-10.0.0/doc/api_samples/sfc-port-pair-groups/0000775000175000017500000000000013656750461023634 5ustar zuulzuul00000000000000networking-sfc-10.0.0/doc/api_samples/sfc-port-pair-groups/port-pair-group-get-resp.json0000664000175000017500000000052113656750333031316 0ustar zuulzuul00000000000000{ "port_pair_group": { "name": "Firewall_PortPairGroup", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Group of Firewall SF instances", "port_pairs": [ "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae" ], "id": "4512d643-24fc-4fae-af4b-321c5e2eb3d1" } } networking-sfc-10.0.0/doc/api_samples/sfc-port-pair-groups/port-pair-group-create-resp.json0000664000175000017500000000052113656750333032002 0ustar zuulzuul00000000000000{ "port_pair_group": { "name": "Firewall_PortPairGroup", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Group of Firewall SF instances", "port_pairs": [ "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae" ], "id": "4512d643-24fc-4fae-af4b-321c5e2eb3d1" } } networking-sfc-10.0.0/doc/api_samples/sfc-port-pair-groups/port-pair-group-update-resp.json0000664000175000017500000000052113656750333032021 0ustar zuulzuul00000000000000{ "port_pair_group": { "name": "Firewall_PortPairGroup", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Group of Firewall SF instances", "port_pairs": [ "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae" ], "id": "4512d643-24fc-4fae-af4b-321c5e2eb3d1" } } networking-sfc-10.0.0/doc/api_samples/sfc-port-pair-groups/port-pair-group-update-req.json0000664000175000017500000000043213656750333031640 0ustar zuulzuul00000000000000{ "port_pair_group": { "name": "Firewall_PortPairGroup", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Group of Firewall SF instances", "port_pairs": [ "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae" ] } } networking-sfc-10.0.0/doc/api_samples/sfc-port-pair-groups/port-pair-group-create-req.json0000664000175000017500000000043213656750333031621 0ustar zuulzuul00000000000000{ "port_pair_group": { "name": "Firewall_PortPairGroup", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Group of Firewall SF instances", "port_pairs": [ "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae" ] } } networking-sfc-10.0.0/doc/api_samples/sfc-port-pair-groups/port-pair-group-list-resp.json0000664000175000017500000000060213656750333031512 0ustar zuulzuul00000000000000{ "port_pair_groups": [ { "name": "Firewall_PortPairGroup", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Group of Firewall SF instances", "port_pairs": [ "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae" ], "id": "4512d643-24fc-4fae-af4b-321c5e2eb3d1" } ] } networking-sfc-10.0.0/doc/api_samples/sfc-service-graphs/0000775000175000017500000000000013656750461023324 5ustar zuulzuul00000000000000networking-sfc-10.0.0/doc/api_samples/sfc-service-graphs/service-graph-update-resp.json0000664000175000017500000000157013656750333031206 0ustar zuulzuul00000000000000{ "service_graph":{ "id":"15e82a5c-c907-4c8c-9501-3e9268178bf8", "name":"new-name", "description":"and new description, the port_chains dictionary can't be updated", "tenant_id":"0b18a09b22ef49a5be0bf51d68ed1962", "project_id":"0b18a09b22ef49a5be0bf51d68ed1962", "port_chains":{ "19b1965e-f528-4a81-bb05-64b18815fcfc":[ "3a478b3b-6012-47c6-9c34-6adaffc51b32" ], "07d0bf74-4293-4e4a-b66f-561ba30bcf76":[ "3a478b3b-6012-47c6-9c34-6adaffc51b32" ], "74e01d2b-3dbc-4de8-9969-d4b95d710103":[ "41e04664-59fb-4f1c-a063-6cfa39ed9ae7", "71b514f6-498b-4ae8-ba4e-dff434ad6c1a" ], "41e04664-59fb-4f1c-a063-6cfa39ed9ae7":[ "07d0bf74-4293-4e4a-b66f-561ba30bcf76", "19b1965e-f528-4a81-bb05-64b18815fcfc" ] } } } networking-sfc-10.0.0/doc/api_samples/sfc-service-graphs/service-graph-create-req.json0000664000175000017500000000126613656750333031007 0ustar zuulzuul00000000000000{ "service_graph":{ "name":"graph", "description":"one graph that uses 6 port chains", "port_chains":{ "19b1965e-f528-4a81-bb05-64b18815fcfc":[ "3a478b3b-6012-47c6-9c34-6adaffc51b32" ], "07d0bf74-4293-4e4a-b66f-561ba30bcf76":[ "3a478b3b-6012-47c6-9c34-6adaffc51b32" ], "41e04664-59fb-4f1c-a063-6cfa39ed9ae7":[ "07d0bf74-4293-4e4a-b66f-561ba30bcf76", "19b1965e-f528-4a81-bb05-64b18815fcfc" ], "74e01d2b-3dbc-4de8-9969-d4b95d710103":[ "41e04664-59fb-4f1c-a063-6cfa39ed9ae7", "71b514f6-498b-4ae8-ba4e-dff434ad6c1a" ] } } } networking-sfc-10.0.0/doc/api_samples/sfc-service-graphs/service-graph-update-req.json0000664000175000017500000000021613656750333031020 0ustar zuulzuul00000000000000{ "service_graph":{ "name":"new-name", "description":"and new description, the port_chains dictionary can't be updated" } } networking-sfc-10.0.0/doc/api_samples/sfc-service-graphs/service-graph-get-resp.json0000664000175000017500000000152613656750333030504 0ustar zuulzuul00000000000000{ "service_graph":{ "id":"15e82a5c-c907-4c8c-9501-3e9268178bf8", "name":"graph", "description":"one graph that uses 6 port chains", "tenant_id":"0b18a09b22ef49a5be0bf51d68ed1962", "project_id":"0b18a09b22ef49a5be0bf51d68ed1962", "port_chains":{ "19b1965e-f528-4a81-bb05-64b18815fcfc":[ "3a478b3b-6012-47c6-9c34-6adaffc51b32" ], "07d0bf74-4293-4e4a-b66f-561ba30bcf76":[ "3a478b3b-6012-47c6-9c34-6adaffc51b32" ], "74e01d2b-3dbc-4de8-9969-d4b95d710103":[ "41e04664-59fb-4f1c-a063-6cfa39ed9ae7", "71b514f6-498b-4ae8-ba4e-dff434ad6c1a" ], "41e04664-59fb-4f1c-a063-6cfa39ed9ae7":[ "07d0bf74-4293-4e4a-b66f-561ba30bcf76", "19b1965e-f528-4a81-bb05-64b18815fcfc" ] } } } networking-sfc-10.0.0/doc/api_samples/sfc-service-graphs/service-graph-create-resp.json0000664000175000017500000000152613656750333031170 0ustar zuulzuul00000000000000{ "service_graph":{ "id":"15e82a5c-c907-4c8c-9501-3e9268178bf8", "name":"graph", "description":"one graph that uses 6 port chains", "tenant_id":"0b18a09b22ef49a5be0bf51d68ed1962", "project_id":"0b18a09b22ef49a5be0bf51d68ed1962", "port_chains":{ "19b1965e-f528-4a81-bb05-64b18815fcfc":[ "3a478b3b-6012-47c6-9c34-6adaffc51b32" ], "07d0bf74-4293-4e4a-b66f-561ba30bcf76":[ "3a478b3b-6012-47c6-9c34-6adaffc51b32" ], "74e01d2b-3dbc-4de8-9969-d4b95d710103":[ "41e04664-59fb-4f1c-a063-6cfa39ed9ae7", "71b514f6-498b-4ae8-ba4e-dff434ad6c1a" ], "41e04664-59fb-4f1c-a063-6cfa39ed9ae7":[ "07d0bf74-4293-4e4a-b66f-561ba30bcf76", "19b1965e-f528-4a81-bb05-64b18815fcfc" ] } } } networking-sfc-10.0.0/doc/api_samples/sfc-service-graphs/service-graph-list-resp.json0000664000175000017500000000247113656750333030700 0ustar zuulzuul00000000000000{ "service_graphs":[ { "id":"15e82a5c-c907-4c8c-9501-3e9268178bf8", "name":"graph", "description":"one graph that uses 6 port chains", "tenant_id":"0b18a09b22ef49a5be0bf51d68ed1962", "project_id":"0b18a09b22ef49a5be0bf51d68ed1962", "port_chains":{ "19b1965e-f528-4a81-bb05-64b18815fcfc":[ "3a478b3b-6012-47c6-9c34-6adaffc51b32" ], "07d0bf74-4293-4e4a-b66f-561ba30bcf76":[ "3a478b3b-6012-47c6-9c34-6adaffc51b32" ], "74e01d2b-3dbc-4de8-9969-d4b95d710103":[ "41e04664-59fb-4f1c-a063-6cfa39ed9ae7", "71b514f6-498b-4ae8-ba4e-dff434ad6c1a" ], "41e04664-59fb-4f1c-a063-6cfa39ed9ae7":[ "07d0bf74-4293-4e4a-b66f-561ba30bcf76", "19b1965e-f528-4a81-bb05-64b18815fcfc" ] } }, { "id":"b91000f0-ece2-4254-9679-76e1ba6843cb", "name":"simpler-graph", "description":"", "tenant_id":"0b18a09b22ef49a5be0bf51d68ed1962", "project_id":"0b18a09b22ef49a5be0bf51d68ed1962", "port_chains":{ "ff90b009-396d-4ce7-b387-82a702125e46":[ "1da89f39-db34-401d-b54b-468e1c648cd1" ] } } ] } networking-sfc-10.0.0/zuul.d/0000775000175000017500000000000013656750461016010 5ustar zuulzuul00000000000000networking-sfc-10.0.0/zuul.d/projects.yaml0000664000175000017500000000051413656750333020523 0ustar zuulzuul00000000000000- project: check: jobs: - networking-sfc-functional - networking-sfc-tempest - networking-sfc-tempest-multinode: voting: false gate: jobs: - networking-sfc-functional - networking-sfc-tempest periodic: jobs: - networking-sfc-tempest-periodic networking-sfc-10.0.0/zuul.d/jobs.yaml0000664000175000017500000000673713656750333017644 0ustar zuulzuul00000000000000- job: name: networking-sfc-functional parent: neutron-functional vars: project_name: networking-sfc - job: name: networking-sfc-tempest parent: neutron-tempest-plugin-sfc files: ^.*$ irrelevant-files: - ^(test-|)requirements.txt$ - ^setup.cfg$ - job: name: networking-sfc-tempest-multinode parent: networking-sfc-tempest nodeset: openstack-two-node-bionic pre-run: playbooks/multinode-scenario-pre-run.yaml roles: - zuul: openstack/devstack vars: devstack_localrc: PHYSICAL_NETWORK: default tempest_plugins: - neutron-tempest-plugin devstack_services: tls-proxy: false tempest: true neutron-dns: true neutron-qos: true neutron-segments: true neutron-trunk: true neutron-log: true cinder: true devstack_local_conf: post-config: $NEUTRON_CONF: quotas: quota_router: 100 quota_floatingip: 500 quota_security_group: 100 quota_security_group_rule: 1000 # NOTE(slaweq): We can get rid of this hardcoded absolute path when # devstack-tempest job will be switched to use lib/neutron instead of # lib/neutron-legacy "/$NEUTRON_CORE_PLUGIN_CONF": ml2: type_drivers: flat,geneve,vlan,gre,local,vxlan mechanism_drivers: openvswitch,l2population ml2_type_vlan: network_vlan_ranges: foo:1:10 ml2_type_vxlan: vni_ranges: 1:2000 ml2_type_gre: tunnel_id_ranges: 1:1000 agent: l2_population: True tunnel_types: vxlan,gre ovs: tunnel_bridge: br-tun bridge_mappings: public:br-ex $NEUTRON_L3_CONF: agent: availability_zone: nova $NEUTRON_DHCP_CONF: agent: availability_zone: nova "/etc/neutron/api-paste.ini": "composite:neutronapi_v2_0": use: "call:neutron.auth:pipeline_factory" noauth: "cors request_id catch_errors osprofiler extensions neutronapiapp_v2_0" keystone: "cors request_id catch_errors osprofiler authtoken keystonecontext extensions neutronapiapp_v2_0" test-config: $TEMPEST_CONFIG: neutron_plugin_options: provider_vlans: foo, agent_availability_zone: nova available_type_drivers: flat,geneve,vlan,gre,local,vxlan group-vars: subnode: devstack_services: tls-proxy: false q-agt: true q-l3: true q-meta: true neutron-qos: true neutron-trunk: true neutron-log: true devstack_local_conf: post-config: # NOTE(slaweq): We can get rid of this hardcoded absolute path when # devstack-tempest job will be switched to use lib/neutron instead of # lib/neutron-legacy "/$NEUTRON_CORE_PLUGIN_CONF": agent: l2_population: True tunnel_types: vxlan,gre ovs: tunnel_bridge: br-tun bridge_mappings: public:br-ex $NEUTRON_L3_CONF: agent: availability_zone: nova - job: name: networking-sfc-tempest-periodic parent: networking-sfc-tempest branches: master networking-sfc-10.0.0/zuul.d/project.yaml0000664000175000017500000000033213656750333020336 0ustar zuulzuul00000000000000- project: templates: - openstack-lower-constraints-jobs-neutron - openstack-python3-ussuri-jobs-neutron - publish-openstack-docs-pti - check-requirements - release-notes-jobs-python3 networking-sfc-10.0.0/requirements.txt0000664000175000017500000000200213656750333020043 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr!=2.1.0,>=2.0.0 # Apache-2.0 eventlet!=0.18.3,!=0.20.1,>=0.18.2 # MIT netaddr>=0.7.18 # BSD python-neutronclient>=6.7.0 # Apache-2.0 SQLAlchemy>=1.2.0 # MIT alembic>=0.8.10 # MIT six>=1.10.0 # MIT stevedore>=1.20.0 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.messaging>=5.29.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 neutron-lib>=1.18.0 # Apache-2.0 neutron>=13.0.0.0b2 # Apache-2.0 # The comment below indicates this project repo is current with neutron-lib # and should receive neutron-lib consumption patches as they are released # in neutron-lib. It also implies the project will stay current with TC # and infra initiatives ensuring consumption patches can land. # neutron-lib-current networking-sfc-10.0.0/setup.cfg0000664000175000017500000000504613656750461016415 0ustar zuulzuul00000000000000[metadata] name = networking-sfc summary = APIs and implementations to support Service Function Chaining in Neutron. description-file = README.rst author = OpenStack author-email = openstack-discuss@lists.openstack.org home-page = https://docs.openstack.org/networking-sfc/latest/ python-requires = >=3.6 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 [files] packages = networking_sfc [entry_points] neutronclient.extension = flow_classifier = networking_sfc.cli.flow_classifier port_chain = networking_sfc.cli.port_chain port_pair = networking_sfc.cli.port_pair port_pair_group = networking_sfc.cli.port_pair_group neutron.db.alembic_migrations = networking-sfc = networking_sfc.db.migration:alembic_migrations neutron.service_plugins = flow_classifier = networking_sfc.services.flowclassifier.plugin:FlowClassifierPlugin sfc = networking_sfc.services.sfc.plugin:SfcPlugin networking_sfc.sfc.drivers = dummy = networking_sfc.services.sfc.drivers.dummy.dummy:DummyDriver ovs = networking_sfc.services.sfc.drivers.ovs.driver:OVSSfcDriver networking_sfc.flowclassifier.drivers = dummy = networking_sfc.services.flowclassifier.drivers.dummy.dummy:DummyDriver ovs = networking_sfc.services.flowclassifier.drivers.ovs.driver:OVSFlowClassifierDriver neutron.agent.l2.extensions = sfc = networking_sfc.services.sfc.agent.extensions.sfc:SfcAgentExtension networking_sfc.sfc.agent_drivers = ovs = networking_sfc.services.sfc.agent.extensions.openvswitch.sfc_driver:SfcOVSAgentDriver oslo.config.opts = networking-sfc = networking_sfc.opts:list_sfc_opts networking-sfc.quotas = networking_sfc.opts:list_quota_opts oslo.policy.policies = networking-sfc = networking_sfc.policies:list_rules neutron.policies = networking-sfc = networking_sfc.policies:list_rules [compile_catalog] directory = networking_sfc/locale domain = networking-sfc [update_catalog] domain = networking-sfc output_dir = networking_sfc/locale input_file = networking_sfc/locale/networking-sfc.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = networking_sfc/locale/networking-sfc.pot [egg_info] tag_build = tag_date = 0 networking-sfc-10.0.0/tox.ini0000664000175000017500000001050713656750333016103 0ustar zuulzuul00000000000000[tox] envlist = py37,pep8 minversion = 3.1.1 skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} OS_LOG_CAPTURE={env:OS_LOG_CAPTURE:true} OS_STDOUT_CAPTURE={env:OS_STDOUT_CAPTURE:true} OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:true} PYTHONWARNINGS=default::DeprecationWarning passenv = TRACE_FAILONLY usedevelop = True install_command = pip install {opts} {packages} deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt whitelist_externals = sh find commands = find . -type f -name "*.py[c|o]" -delete find -path "*/__pycache__*" -delete stestr run {posargs} # there is also secret magic in ostestr which lets you run in a fail only # mode. To do this define the TRACE_FAILONLY environmental variable. [testenv:functional] setenv = {[testenv]setenv} OS_TEST_PATH=./networking_sfc/tests/functional [testenv:dsvm-functional] setenv = {[testenv]setenv} OS_TEST_PATH=./networking_sfc/tests/functional OS_SUDO_TESTING=1 OS_FAIL_ON_MISSING_DEPS=1 OS_TEST_TIMEOUT=180 OS_TESTR_CONCURRENCY=1 OS_LOG_PATH={env:OS_LOG_PATH:/opt/stack/logs} whitelist_externals = sh cp sudo commands = stestr run {posargs} [testenv:pep8] commands = flake8 pylint --rcfile=.pylintrc --output-format=colorized {posargs:networking_sfc} {toxinidir}/tools/check_unit_test_structure.sh neutron-db-manage --subproject networking-sfc --database-connection sqlite:// check_migration {[testenv:genconfig]commands} {[testenv:genpolicy]commands} whitelist_externals = sh [testenv:cover] setenv = PYTHON=coverage run --source networking_sfc --parallel-mode commands = stestr run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml [testenv:venv] commands = {posargs} [testenv:docs] deps = -r{toxinidir}/doc/requirements.txt commands = sphinx-build -W -b html -d doc/build/doctrees doc/source doc/build/html [testenv:pdf-docs] deps = {[testenv:docs]deps} envdir = {toxworkdir}/docs whitelist_externals = make commands = sphinx-build -W -b latex doc/source doc/build/pdf make -C doc/build/pdf [flake8] # TODO(dougwig) -- uncomment this to test for remaining linkages # N530 direct neutron imports not allowed ignore = N530 # From neutron-lib flake8 # H904: Delay string interpolations at logging calls enable-extensions=H904 show-source = true builtins = _ exclude = ./.*,build,dist import-order-style = pep8 [hacking] import_exceptions = networking_sfc._i18n local-check-factory = neutron_lib.hacking.checks.factory [testenv:api-ref] whitelist_externals = rm deps = {[testenv:docs]deps} envdir = {toxworkdir}/docs commands = rm -rf api-ref/build sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html [testenv:debug] commands = oslo_debug_helper -t networking_sfc/tests {posargs} [testenv:releasenotes] deps = {[testenv:docs]deps} envdir = {toxworkdir}/docs commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:genconfig] commands = oslo-config-generator --config-file etc/oslo-config-generator/networking-sfc.conf [testenv:genpolicy] commands = oslopolicy-sample-generator --config-file=etc/oslo-policy-generator/policy.conf [testenv:lower-constraints] deps = -c{toxinidir}/lower-constraints.txt -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt [testenv:dev] # run locally (not in the gate) using editable mode # https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs commands = pip install -q -e "git+https://opendev.org/openstack/neutron#egg=neutron" [testenv:py-dev] commands = {[testenv:dev]commands} {[testenv]commands} [testenv:pep8-dev] deps = {[testenv]deps} commands = {[testenv:dev]commands} {[testenv:pep8]commands} # This environment can be used to quickly validate that all needed system # packages required to successfully execute test targets are installed [testenv:bindep] # Do not install any requirements. We want this to be fast and work even if # system dependencies are missing, since it's used to tell you what system # dependencies are missing! This also means that bindep must be installed # separately, outside of the requirements files. deps = bindep commands = bindep test networking-sfc-10.0.0/ChangeLog0000664000175000017500000004657713656750461016364 0ustar zuulzuul00000000000000CHANGES ======= 10.0.0 ------ * Cleanup py27 support * Remove the dependency on the "mock" package 10.0.0.0b1 ---------- * Drop python 2 support and testing * Switch to Ussuri jobs * Fix misspell word * PDF documentation build * [Functional tests] Fix SIGHUP handling tests * Update master for stable/train 9.0.0 ----- * Add Python 3 Train unit tests * Update the constraints url * Add libpq-dev to bindep.txt to fix gate error * Use opendev repository * Switch functional jobs to python 3 * Switch functional job to Zuulv3 syntax * Complete move of networking-sfc tempest tests to tempest plugin * Update sphinx requirements * Switch to native openflow implementation * OpenDev Migration Patch * Dropping the py35 testing * Combine two patches to fix gates * Replace openstack.org git:// URLs with https:// * Update master for stable/stein 8.0.0 ----- * add python 3.7 unit test job 8.0.0.0b1 --------- * doc: Add policy reference * Fix gate issues on master * Update OXM\_OF\_PKT\_REG to OXM\_OF\_PKT\_REG0 * use common rpc and exceptions from neutron-lib * Extend timeout of tempest job to 3h * tox: make pep8-dev use python3 like pep8 * fix tox python3 overrides * Upgrade pylint to a version that works with python3 * Update Openflow version from 1.1 to 1.3 * Increment versioning with pbr instruction * Define default policies in code * use openstack-lower-constraints-jobs-neutron job template * Change openstack-dev to openstack-discuss * Don't quote {posargs} in tox.ini * add local tox targets for pep8, py3 and py27 * Correct "openstack sfc port create" and delete superfluous "\" * use neutron-lib for common\_db\_mixin methods * use db api from neutron-lib * Remove those copy words occured twice times in api.rst * Update min tox version to 2.0 * add python 3.6 unit test job * switch documentation job to new PTI * opt in for neutron-lib consumption patches * import zuul job settings from project-config * use setup\_extension in unit tests * fix tox python3 overrides * Update reno for stable/rocky 7.0.0 ----- * remove dead IDAllocator code * Add release notes link in README * Fix alembic migration template generator script * Use get\_marker\_obj from neutron-lib * Use i18n from the project * Fix requirements * Switch to stestr * update requirements for neutron-lib 1.18.0 * Function argument name not used correctly 7.0.0.0b3 --------- * uncap eventlet 7.0.0.0b1 --------- * use rpc Connection rather than create\_connection * Fix constraints and uncap eventlets * Fix new PEP8 errors * Delete port chain failed * remove unused plugin.get\_plugin\_name() * Cleanup test-requirements * Revert "DNM: tentative hack to point to neutron-lib master" * Updated from global requirements * DNM: tentative hack to point to neutron-lib master * Update OPNFV link on README * add lower-constraints job * Avoid tox-install.sh * Update links in README * use common agent topics from neutron-lib * stop mocking vlantransparent LOG in UTs * Update reno for stable/queens * Remove duplicated 'the' word in documentation * Zuul: Remove project name * devstack: support lib/neutron * Remove the deprecated "giturl" option 6.0.0 ----- * Updated from global requirements * test requirements: move from tempest-lib to tempest 6.0.0.0b2 --------- * Updated from global requirements * Remove setting of version/release from releasenotes * Updated from global requirements * Updated from global requirements * Update l2pop calls to use context instead of session * Update OpenStack Client syntax for graphs (doc) * Updated from global requirements * Zuul: add file extension to playbooks path * Remove Zuul v3 standard setups * Support the NSH SFC Encapsulation protocol w/ OVS * Zuul v3 migration * Driver changes for Tap SF support in portchain * API and CLI changes for Passive Tap SF * Switch to tempest.common.utils 6.0.0.0b1 --------- * Update SFC Encapsulation documentation to NSH * Specify sections in configuration samples * Replace all uses of dl\_type with eth\_type * Add Service Graph OVS Driver+Agent logic+tests * Use common is\_a\_flow\_line() method from ovs\_lib * Add Service Graph DB and Plugin logic+tests * Updated from global requirements * SFC Proxy Port Correlation for Non-Transparent Service Functions * Add Service Graph API/extension resource and stubs * Shrink Tempest scenario manager copy * Use openstack CLI in documentation * Update imports for neutron/ml2 config * Updated from global requirements * Fix unit tests and test configuration * SfcOVSBridgeExt: rely on ovs\_lib to use the right OF version * Updated from global requirements * Updated from global requirements * Fix multinode tempest tests * Clean tempest gate configuration * Update reno for stable/pike * Add Service Graph documentation * tempest: update on removal of cred manager aliases * Added Workflow & OVS flow details for TAP SF 5.0.0.0rc1 ---------- * Updated from global requirements * Fix cli unit tests * Remove OSC parts * Add auto-generated config reference * Automatically generate configuration files 5.0.0.0b3 --------- * Updated from global requirements * Updated from global requirements * Update documentation links * Update doc URLs, add main doc link in readme * Rearrange existing documentation to fit the new standard layout * Switch to openstackdocstheme * Turn on warning-is-error for sphinx build * Use flake8-import-order plugin * Updated from global requirements * Replace the usage of 'manager' with 'os\_primary' * Updated from global requirements * Updated from global requirements * Fix code for upcoming pylint 1.7.1 * Updated from global requirements * Fix html\_last\_updated\_fmt for Python3 5.0.0.0b2 --------- * Updated from global requirements * Updated from global requirements * Trivial fix typos * OVS Driver tests: scope Flow Rules to Port Chains * use attribute functions/operations from neutron-lib * Updated from global requirements * Require Port Pairs to share same correlation type * Updated from global requirements * Updated from global requirements * test fix: follow neutron change Id22faa1f6179c2fdf8a136972d65f10749c9fc2e * Re-enable translation tox tests * Updated from global requirements * remove delete\_flow variant, now supported by neutron ovs\_lib * Correct dl\_type for non-SFC-proxied hops * Create Port\_Mapping Support with OVS Driver and Agent * Fix group id for symmetric flows reverse path * devstack: use l2\_agent lib to configure sfc ext * Remove OVS compilation on functional and fullstack jobs * Use new enginefacade * SFC Proxy processing for non-transparent Service Functions * Disable new N537 hacking check from next neutron-lib * Migrate neutron.plugins.common to neutron-lib * Fix UT failures with multiple flow classifiers * Updated from global requirements 5.0.0.0b1 --------- * Replace space with underscore in doc file * Add missing specifications in main toctree * Detail installation steps in main documentation * Updated from global requirements * Updated from global requirements * Remove log translations * Follow OVSCookieBridge refactoring in neutron * Replace insertion-mode with tap-enabled * Update internal links in documentation * Enable placement-api in devstack job config * Fix formatting of System Design and Workflow * fix tempest multinode test failure - enable nova vnc in localrc per bug 1381568's suggestion - use admin\_manager to create client for multinode tests * Use data\_utils from tempest.lib * Updated from global requirements * Add all extensions to the tempest run * Be explicit about the extensions being tested * Port Chain Tap for passive Service Function * Fix N536 hacking check from neutron-lib * Delete stale port-chain db entry * Updated from global requirements * Use neutron-lib's context module * Update reno for stable/ocata * consume ServicePluginBase from neutron-lib 4.0.0 ----- * Fix release note formatting * Add reno release notes * tempest: Switch to local copy of tempset/scenario/manager.py * Support MPLS correlation without SFC Proxy * Sync the flake8 extensions with neutron-lib * Default chain parameters per parameter * Drop localrc references * Symmetric Chain Support Tempest Scenario Test * Symmetric Chain Support Unit\_test drivers test * Include SRC/DST IP in the flowclassifier from logical ports * Symmetric Chain Support Unit\_test * Symmetric Chain Support Tempest Api Test * Use simple dict update and dict literal to optimize code * Symmetric Chain Support for OVS driver and agent * Optimize and clean-up sfc\_driver * Pass update\_flowrule\_status in driver.py * General clean up and optimize the code after PyCharm inspect code * Remove useless file subunit-trace.py * Optimize unit test test\_driver * Fix possible reference before assignment errors * Optimized imports to comply OpenStack style guideline * Remove unused keyword parameters, and fix pylint wrong-import-order * Add detail error message for SSH timeout in scenario tests * Make the pep8 env detect style errors again * Updated from global requirements * Remove useless returns, modify code to use kwargs * Optimize \_resolve\_resource\_path logic * Clean requirements * Remove support for py34 * Fix Annotator Errors * Use https instead of http for git.openstack.org * Update hacking version * Prepare for using standard python tests * Import type\_vxlan in the base tests * Remove unused oslo-config-generator parts * Use stevedore testing interface * Switch to decorators.idempotent\_id * Replace six.iteritems with dict.items() * Updated from global requirements * Functional tests: use neutron rootwrap filters * Use ostestr everywhere * Remove unused empty file * Fix extension loading functional test * Remove unused module index in documentation * Use neutron-lib portbindings api-def * Remove support for py33 * Updated from global requirements * Use project\_id instead of tenant\_id in DB models/OVS driver * Reflect neutron-lib migration of agent extensions from neutron * Fix intermittent tempest test failures * Fix flow deletion unit tests * Fix functional and tempest tests * Updated from global requirements * Fix Json Port Chain create response example * Fix typos in functions name * Updated from global requirements * Enable PEP8 tests everywhere * Move db migration tests to functional tests * Use ovs devstack plugin functions from neutron * Replaces uuid.uuid4 with uuidutils.generate\_uuid() * Fix neutronclient.i18n.\_() DeprecationWarning * Updated from global requirements * Updated from global requirements * Update OVS handling and test hooks * Fix permissions of tools/ostestr\_compat\_shim.sh * Fix devstack all-in-one installation * added cli parameters validation * Filter out tenant\_id in OSC commands output * Use DB field sizes instead of \_MAX\_LEN constants * Use ExtensionDescriptor from neutron-lib * Remove PLURALS * Show team and repo badges on README * Add reno for release notes management * Add script for neutron-lib source periodic job * Updated from global requirements * Remove unused LOG to keep code clean * Fix file permissions * Replace 'assertFalse(a in b)' with 'assertNotIn(a, b)' * Switch to using plugins directory in lieu of neutron manager * Allow SFC installation with q-agt service only * Make osc entry points distinctive 3.0.0 ----- * Updated from global requirements * add multinode support for tempest * Remove last vestiges of oslo-incubator * Add sfc commands to openstackclient * Remove deprecation warnings for model\_base * Fix deprecation warning in tests * Fix file permissions * Updated from global requirements * refactor parameter validation * Updated from global requirements * Using --strict and add priority for del\_flows * Fix random DB unit test failure * Added support of symmetric chain parameter in API * Bring models in sync with migrations, add test * Replace OVSSfcAgent by a L2 agent extension * Networking-sfc / OVN Driver Details of the OVN northbound DB schema have been removed * Use constraints environment for testing * Fix bug where a chain\_id could not be specified * Remove retrying from requirements * Fix typo in db.py * Force tempest tests to run sequentially * Updated from global requirements * Include alembic migrations in module * Enable plugins to determine if they want to rebuild OVS * Change query for port-pair-group and port-chain id. Fix bug 1625186 * driver interface: introduce precommit, postcommit * fix tempest tests failure * Use neutron-lib model\_base * OVS Driver and Agent for Symmetric Port Chains * Revert "Revert "Rename DB columns: tenant -> project"" Modified unit tests * [api-ref] Remove unused parameter * [api-ref] Remove temporary block in conf.py * make functional test not skip the testcases * Fix order of arguments in assertEqual 2.0.0 ----- * Remove tempest from test\_requirements.txt, which could cause package incompatible. This happened sometimes when running devstack at stable/mitaka, change #351486 added tempest testcases didn't test at stable/mitaka, and stack.sh would fail due to tempest requiring some python packages newer than installed * Config logABug feature for networking-sfc api-ref * Unifying parameters type * Remove logging import unused * Remove unused CONF import * set [flowclassifier] section drivers to be ovs in devstack setup * Revert "Rename DB columns: tenant -> project" * Add OVS driver cross-subnet check * Revert "update ovs agent code to catchup neutron's changes." * Add chain\_id, port\_pair\_group\_parameters and weight to API documentation * Add chain\_id support in port chain and group\_id support in port pair group * Also add port pair group parameters when creating port pair group * update flow classifer api to be more smart to parse parameters * Get ready for os-api-ref sphinx theme change * Fix the mistakes in the comments * Add weight in service\_function\_params when creating port pair * update ovs agent code to catchup neutron's changes * ignore build directory in pep8 tests * update db migration auto generation code * move all db migration files from newton branch to mitaka branch * Fix the typo in the file * Rename DB columns: tenant -> project * Enable DeprecationWarning in test environments * Updated from global requirements * Add Python 3.5 classifier and venv * add tempest testcases * add functional tests * Update the home-page info with the developer documentation * Fix bug that restarting ovs agent will eliminate existing port chains' flow rules * Remove execute permission of common py files in networking-sfc * API reference fixes for tox run * Correct reraising of exception * Fix order of arguments in assertEqual * Updated from global requirements * Fix raise when egress does not belong to a host * remove unnecessary code and simplify the logic in ovs * Use protocol definitions from neutron-lib * Fix test deprecation warnings * Adds API Reference documentation * There are some word problems to be corrected * Delete unused LOG to keep code clean * removed sfc\_encap\_mode config option * The typo needs to be fixed:releated/mechanim * Updated from global requirements * Fix order of arguments in assertEqual * Fix the document missing * Fix tox unit test issue * Set logical\_source\_port optional in db * Cleanup dead Code and Simplify the logic in ovs driver and ovs agent. - Clean the dead Code which is not used anymore. - Simplify some code logic, remove unnecessary complex logic to make the code easy to understand * Updated from global requirements * Expose classifier driver pre-commit error to the user * Case sensitive errors in Networking-sfc documentation * FlowClassifier driver - add missing space in error log * Fix the wrong sequence number display * Remove check\_i18n checks * Fix delete\_flow\_classifier error message * Tox: Remove exclude directories that no longer exist * Use converters from neutron-lib * Updated from global requirements * Set default flow classifier driver * Change port-pair-group CRUD for update in port-chain * driver\_manager: remove unused docstring * make ovs driver to support update port chain with modifying port-pair-groups * Add unittests for ovs agent * Add unittests for ovs driver * fix bug that new nova instance cannot get ip address in multinodes - In multinodes, the flood flow rule is not generated correctly. - When node instance starts, it sends dhcp broadcast request to - dhcp agent. The broadcast packet drops in br-tun because there - is only default rule to deal with flood packet in br-tun. - The reason is ovs-agent uses mod-flow to add flood flow but - in openvswitch 2.4. The behavior is a little different from - openvswitch 2.0. It will not add a new flow rule if there is no - matching in existing flow rules. The change to to change the - ovs action to install flood entry from mod-flow to add-flow. - The updated action behaves similar as other ovs actions like - installing unicast entry or installing arp responder entry * Updated from global requirements * Move some flow classifier fields restriction from api to ovs driver * Fixes port chain creation fails * Fix the bug that updating a port chain to add a flow classifier may not work * Fixes create\_connection() takes no arguments error * Switch to using hacking checks from neutron-lib * Fix pep8 import errors * \_get\_tenant\_id\_for\_create is now removed * Updated from global requirements * Fix coverage option and execution * Modified ext-list name field for user readablity * Removes unused requirements * Remove use of LOG.warn() * Updated from global requirements * Remove unnecessary \_\_init\_\_.py in top directory * Fix pep8 error E226 * update README.rst to be more precise * Support backward compatibility of L2population * correct README.rst format * update networking-sfc code to mitaka version * Add contributors section in README docucment * check flow classifiers conflict between port chains * add missing db migration files 1.0.0 ----- * Realizing SFC: OVS Agent and OVS * Implementing SFC OVS Driver * Updated from global requirements * update flow classifier api * move networking-sfc project to post-versioning * Trivial addition of a space in a help string * Updated workflow doc for ease of use * Implementing Common Driver Manager Framework * remove python 2.6 trove classifier * Implementing Port-Chain * Implementing Flow Classifier * Updated from global requirements * Implementing Command line support for networking-sfc * Improve examples for how to use networking-sfc Devstack plugin * Deprecated tox -downloadcache option removed * Updated from global requirements * add /etc/neutron/rootwrap.d to support devstack * Make sure correct branch of neutron is pulled in * Implementing devstack support in networking-sfc * Updated from global requirements * Updated from global requirements * Add a warning about the unreleased nature of the SFC API * Modified devref documents for more clarity * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Realizing SFC (2/5): Introducing alembic DB migration framework * Realizing SFC (1b/5): Adding additional tox and test settings * Refactored documents as per neutron standards * Updated from global requirements * Updated from global requirements * Amendments to API document * Fleshing out introductory documentation for networking-sfc * Realizing SFC (1/6) * Fix some spelling and format typo in api manual * Updated from global requirements * Update hyperlinks from Gerrit to rendered docs * Port Chain OVS Driver and Agent * API for Service Chaining * Service Chain System Design and Flow * Updated from global requirements * Add neutronclient to test dependencies * API for Service Chaining * Initial Cookiecutter dump * Added .gitreview networking-sfc-10.0.0/MANIFEST.in0000664000175000017500000000043513656750333016325 0ustar zuulzuul00000000000000include AUTHORS include ChangeLog include networking_sfc/db/migration/README include networking_sfc/db/migration/alembic_migrations/script.py.mako recursive-include networking_sfc/db/migration/alembic_migrations/versions * exclude .gitignore exclude .gitreview global-exclude *.pyc networking-sfc-10.0.0/etc/0000775000175000017500000000000013656750461015342 5ustar zuulzuul00000000000000networking-sfc-10.0.0/etc/README.txt0000664000175000017500000000101213656750333017030 0ustar zuulzuul00000000000000To generate the sample networking-sfc configuration files and the sample policy file, run the following commands respectively from the top level of the networking-sfc directory: tox -e genconfig tox -e genpolicy If a 'tox' environment is unavailable, then you can run the following commands respectively instead to generate the configuration files: oslo-config-generator --config-file etc/oslo-config-generator/networking-sfc.conf oslopolicy-sample-generator --config-file=etc/oslo-policy-generator/policy.conf networking-sfc-10.0.0/etc/oslo-config-generator/0000775000175000017500000000000013656750461021545 5ustar zuulzuul00000000000000networking-sfc-10.0.0/etc/oslo-config-generator/networking-sfc.conf0000664000175000017500000000020513656750333025347 0ustar zuulzuul00000000000000[DEFAULT] output_file = etc/networking-sfc.conf.sample wrap_width = 79 namespace = networking-sfc namespace = networking-sfc.quotas networking-sfc-10.0.0/etc/oslo-policy-generator/0000775000175000017500000000000013656750461021577 5ustar zuulzuul00000000000000networking-sfc-10.0.0/etc/oslo-policy-generator/policy.conf0000664000175000017500000000011213656750333023735 0ustar zuulzuul00000000000000[DEFAULT] output_file = etc/policy.yaml.sample namespace = networking-sfc networking-sfc-10.0.0/HACKING.rst0000664000175000017500000000036613656750333016370 0ustar zuulzuul00000000000000Networking SFC Style Commandments ================================= Please see the Neutron HACKING.rst file for style commandments for networking-sfc: `Neutron HACKING.rst `_ networking-sfc-10.0.0/.pylintrc0000664000175000017500000000606013656750333016434 0ustar zuulzuul00000000000000# The format of this file isn't really documented; just use --generate-rcfile [MASTER] # Add to the black list. It should be a base name, not a # path. You may set this option multiple times. ignore=.git,tests [MESSAGES CONTROL] # NOTE(gus): This is a long list. A number of these are important and # should be re-enabled once the offending code is fixed (or marked # with a local disable) disable= # "F" Fatal errors that prevent further processing import-error, # "I" Informational noise locally-disabled, # "E" Error for important programming issues (likely bugs) access-member-before-definition, no-member, no-method-argument, no-self-argument, # "W" Warnings for stylistic problems or minor programming issues abstract-method, arguments-differ, attribute-defined-outside-init, bad-builtin, bad-indentation, broad-except, dangerous-default-value, deprecated-lambda, expression-not-assigned, fixme, global-statement, no-init, non-parent-init-called, not-callable, protected-access, redefined-builtin, redefined-outer-name, signature-differs, star-args, super-init-not-called, super-on-old-class, unpacking-non-sequence, unused-argument, unused-import, unused-variable, # "C" Coding convention violations bad-continuation, invalid-name, missing-docstring, superfluous-parens, # "R" Refactor recommendations abstract-class-little-used, abstract-class-not-used, duplicate-code, inconsistent-return-statements, interface-not-implemented, no-else-return, no-self-use, too-few-public-methods, too-many-ancestors, too-many-arguments, too-many-branches, too-many-instance-attributes, too-many-lines, too-many-locals, too-many-nested-blocks, too-many-public-methods, too-many-return-statements, too-many-statements [BASIC] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowercased with underscores method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$ # Module names matching neutron-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [FORMAT] # Maximum number of characters on a single line. max-line-length=79 [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. # _ is used by our localization additional-builtins=_ [CLASSES] # List of interface methods to ignore, separated by a comma. ignore-iface-methods= [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules= # should use oslo_serialization.jsonutils json [TYPECHECK] # List of module names for which member attributes should not be checked ignored-modules=six.moves,_MovedItems [REPORTS] # Tells whether to display a full report or only the messages reports=no networking-sfc-10.0.0/releasenotes/0000775000175000017500000000000013656750461017260 5ustar zuulzuul00000000000000networking-sfc-10.0.0/releasenotes/notes/0000775000175000017500000000000013656750461020410 5ustar zuulzuul00000000000000networking-sfc-10.0.0/releasenotes/notes/.placeholder0000664000175000017500000000000013656750333022657 0ustar zuulzuul00000000000000networking-sfc-10.0.0/releasenotes/notes/mpls-correlation-c36070eba63b9f87.yaml0000664000175000017500000000047313656750333027011 0ustar zuulzuul00000000000000--- features: - | Port Pairs now support ``correlation='mpls'`` as a ``service_function_parameters`` key and value pair, when using the OVS Driver. Having this correlation prevents the insertion of "SFC Proxy" flows that remove the MPLS labels before sending packets to the Service Functions. networking-sfc-10.0.0/releasenotes/notes/drop-py27-support-4670c8cdcfa3ba78.yaml0000664000175000017500000000033213656750333027124 0ustar zuulzuul00000000000000--- upgrade: - | Python 2.7 support has been dropped. Last release of networking-sfc to support python 2.7 is OpenStack Train. The minimum version of Python now supported by networking-sfc is Python 3.6. networking-sfc-10.0.0/releasenotes/notes/networking-sfc-0151b67501c641ef.yaml0000664000175000017500000000107513656750333026277 0ustar zuulzuul00000000000000--- features: - | Symmetric Port Chains can now be created, when using the OVS Driver, which will instantiate both a forward and a reverse path for the Port Chain. To enable, set ``symmetric=True`` as a ``chain_parameters`` key and value pair. Path-ID can now be configured. fixes: - | SFC path-ID parameter (Bug `1567654 `_) - | Path-ID generation in plugin (Bug `1588460 `_) - | Port Pair Group parameter (Bug `1588463 `_) networking-sfc-10.0.0/releasenotes/notes/unique-correlation-in-ppg-96d803a244425f66.yaml0000664000175000017500000000177513656750333030324 0ustar zuulzuul00000000000000--- upgrade: - | When upgrading from Ocata (v4), existing Port Chains that have been deployed using Port Pair Groups that combine Port Pairs of different correlation types (within the same Port Pair Group) will render differently when using the OVS Driver. Whereas before all the Port Pairs would have their correlation ignored if at least one of them had ``correlation=None``, now the OVS driver will render each differently. Recreation of each Port Pair Group (that have inconsistent Port Pairs' correlations) is recommended - and only Port Pairs sharing the same correlation type will now be supported. fixes: - | When creating Port Pair Groups, validation of each Port Pair's correlation type will now occur. Every Port Pair needs to share the same correlation type when they get attached to a specific Port Pair Group. This guarantees homogeneity in the composition of a Port Pair Group and clarifies the definition and feature set of each hop of a chain. networking-sfc-10.0.0/releasenotes/notes/service-graphs-4a1e54f6bbbfe805.yaml0000664000175000017500000000045013656750333026566 0ustar zuulzuul00000000000000--- features: - | Introduced new API resource, Service Graphs. Based on IETF SFC Encapsulation, this construct allows Port Chains to be linked together, forming dependencies between them (beyond simple flow classification). This is also known as Reclassification and Branching. networking-sfc-10.0.0/releasenotes/notes/sfc-tap-port-pair-db6b2f3d29520c9b.yaml0000664000175000017500000000061413656750333027035 0ustar zuulzuul00000000000000--- features: - | Service Function Chaining now supports insertion of Tap Service Functions. These SFs are deployed to passively monitor/analyze network traffic. One of the widely deployed use case is IDS. To flag a SF to be Tap, Port Pair Group should be created with ``--enable-tap`` flag. openstack sfc port pair group create tap-ppg --port-pair tap-pp --enable-tap networking-sfc-10.0.0/releasenotes/source/0000775000175000017500000000000013656750461020560 5ustar zuulzuul00000000000000networking-sfc-10.0.0/releasenotes/source/train.rst0000664000175000017500000000017613656750333022431 0ustar zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train networking-sfc-10.0.0/releasenotes/source/_static/0000775000175000017500000000000013656750461022206 5ustar zuulzuul00000000000000networking-sfc-10.0.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000013656750333024455 0ustar zuulzuul00000000000000networking-sfc-10.0.0/releasenotes/source/conf.py0000664000175000017500000002144213656750333022060 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # networking-sfc documentation build configuration file, created by # sphinx-quickstart on Mon Oct 17 13:58:59 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # import sys # import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options repository_name = 'openstack/networking-sfc' bug_project = 'networking-sfc' bug_tag = '' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Networking SFC Release Notes' copyright = u'2016, Networking SFC Developers' # Release notes are version independent. # The short X.Y version. version = '' # The full version, including alpha/beta/rc tags. release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'NetworkingSFCReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'NetworkingSFCReleaseNotes.tex', u'Networking SFC Release Notes Documentation', u'Networking SFC Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'NetworkingSFCReleaseNotes', u'Networking SFC Release Notes Documentation', [u'Networking SFC Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'NetworkingSFCReleaseNotes', u'Networking SFC Release Notes Documentation', u'Networking SFC Developers', 'NetworkingSFCReleaseNotes', 'Networking SFC project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] networking-sfc-10.0.0/releasenotes/source/stein.rst0000664000175000017500000000022113656750333022425 0ustar zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein networking-sfc-10.0.0/releasenotes/source/queens.rst0000664000175000017500000000022313656750333022605 0ustar zuulzuul00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens networking-sfc-10.0.0/releasenotes/source/unreleased.rst0000664000175000017500000000015313656750333023436 0ustar zuulzuul00000000000000============================ Current Series Release Notes ============================ .. release-notes:: networking-sfc-10.0.0/releasenotes/source/rocky.rst0000664000175000017500000000022113656750333022432 0ustar zuulzuul00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky networking-sfc-10.0.0/releasenotes/source/index.rst0000664000175000017500000000030413656750333022414 0ustar zuulzuul00000000000000============================ Networking SFC Release Notes ============================ .. toctree:: :maxdepth: 1 unreleased train stein rocky queens pike ocata newton networking-sfc-10.0.0/releasenotes/source/ocata.rst0000664000175000017500000000023013656750333022372 0ustar zuulzuul00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata networking-sfc-10.0.0/releasenotes/source/newton.rst0000664000175000017500000000021113656750333022614 0ustar zuulzuul00000000000000=========================== Newton Series Release Notes =========================== .. release-notes:: :branch: origin/stable/newton networking-sfc-10.0.0/releasenotes/source/pike.rst0000664000175000017500000000021713656750333022240 0ustar zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike networking-sfc-10.0.0/api-ref/0000775000175000017500000000000013656750461016112 5ustar zuulzuul00000000000000networking-sfc-10.0.0/api-ref/source/0000775000175000017500000000000013656750461017412 5ustar zuulzuul00000000000000networking-sfc-10.0.0/api-ref/source/sfc-classifiers.inc0000664000175000017500000001367013656750333023172 0ustar zuulzuul00000000000000.. -*- rst -*- .. needs:method_verification .. needs:parameter_verification .. needs:example_verification .. needs:body_verification ===================================== Flow Classifiers (flow-classifiers) ===================================== Lists, shows information for, creates, updates and deletes flow classifiers. List Flow Classifiers ===================== .. rest_method:: GET /v1.0/sfc/flow_classifiers Lists flow classifiers. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - tenant_id: tenant_id Response -------- .. rest_parameters:: parameters.yaml - id: flow_classifier_id - tenant_id: tenant_id - name: classifier_name - description: description Response Example ---------------- **Example List flow classifiers: JSON response** .. literalinclude:: ../../doc/api_samples/sfc-classifiers/flow-classifier-list-resp.json :language: javascript Create Flow Classifier ====================== .. rest_method:: POST /v1.0/sfc/flow_classifiers Creates a flow classifier. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - tenant_id: tenant_id - name: classifier_name - description: description - ethertype: ethertype - protocol: protocol - source_port_range_min: source_port_range_min - source_port_range_max: source_port_range_max - destination_port_range_min: destination_port_range_min - destination_port_range_max: destination_port_range_max - source_ip_prefix: source_ip_prefix - destination_ip_prefix: destination_ip_prefix - source_logical_port: source_logical_port - destination_logical_port: destination_logical_port - l7_parameters: l7_parameters Request Example --------------- **Example Create flow classifier: JSON request** .. literalinclude:: ../../doc/api_samples/sfc-classifiers/flow-classifier-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - id: flow_classifier_id - tenant_id: tenant_id - name: classifier_name - description: description - ethertype: ethertype - protocol: protocol - source_port_range_min: source_port_range_min - source_port_range_max: source_port_range_max - destination_port_range_min: destination_port_range_min - destination_port_range_max: destination_port_range_max - source_ip_prefix: source_ip_prefix - destination_ip_prefix: destination_ip_prefix - source_logical_port: source_logical_port - destination_logical_port: destination_logical_port - l7_parameters: l7_parameters Response Example ---------------- **Example Create flow classifier: JSON response** .. literalinclude:: ../../doc/api_samples/sfc-classifiers/flow-classifier-create-resp.json :language: javascript Show Flow Classifier Details ============================ .. rest_method:: GET /v1.0/sfc/flow_classifiers/{flow_classifier_id} Shows details for a flow classifier. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - id: flow_classifier_id - tenant_id: tenant_id Response -------- .. rest_parameters:: parameters.yaml - id: flow_classifier_id - tenant_id: tenant_id - name: classifier_name - description: description - ethertype: ethertype - protocol: protocol - source_port_range_min: source_port_range_min - source_port_range_max: source_port_range_max - destination_port_range_min: destination_port_range_min - destination_port_range_max: destination_port_range_max - source_ip_prefix: source_ip_prefix - destination_ip_prefix: destination_ip_prefix - source_logical_port: source_logical_port - destination_logical_port: destination_logical_port - l7_parameters: l7_parameters Response Example ---------------- **Example Show flow classifier: JSON response** .. literalinclude:: ../../doc/api_samples/sfc-classifiers/flow-classifier-get-resp.json :language: javascript Update Flow Classifier ====================== .. rest_method:: PUT /v1.0/sfc/flow_classifiers/{flow_classifier_id} Updates a flow classifier. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - id: flow_classifier_id - tenant_id: tenant_id - name: classifier_name - description: description Request Example --------------- **Example Update flow classifier: JSON request** .. literalinclude:: ../../doc/api_samples/sfc-classifiers/flow-classifier-update-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - id: flow_classifier_id - tenant_id: tenant_id - name: classifier_name - description: description - ethertype: ethertype - protocol: protocol - source_port_range_min: source_port_range_min - source_port_range_max: source_port_range_max - destination_port_range_min: destination_port_range_min - destination_port_range_max: destination_port_range_max - source_ip_prefix: source_ip_prefix - destination_ip_prefix: destination_ip_prefix - source_logical_port: source_logical_port - destination_logical_port: destination_logical_port - l7_parameters: l7_parameters Response Example ---------------- **Example Update flow classifier: JSON response** .. literalinclude:: ../../doc/api_samples/sfc-classifiers/flow-classifier-update-resp.json :language: javascript Delete Flow Classifier ====================== .. rest_method:: DELETE /v1.0/sfc/flow-classifiers/{flow_classifier_id} Deletes a flow classifier. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - id: flow_classifier_id - tenant_id: tenant_id Response -------- There is no body content for the response of a successful Delete request. networking-sfc-10.0.0/api-ref/source/parameters.yaml0000664000175000017500000000726613656750333022452 0ustar zuulzuul00000000000000# variables in header x-openstack-request-id: description: > A unique ID for tracking the request. The request ID associated with the request appears in the log lines for that request. By default, the middleware configuration ensures that the request ID appears in the log files. in: header required: true type: string # variables in path port_chain_id: description: | The UUID of the port-chain. in: path required: true type: string # variables in query # variables in body chain_id: description: | The UUID of the port-chain. in: body required: true type: string chain_parameters: description: | A dictionary of chain parameters. in: body required: false type: object classifier_name: description: | The name of the flow-classifier. in: body required: true type: string description: description: | Text describing this parameter. in: body required: false type: string destination_ip_prefix: description: | The destination IP prefix. in: body required: false type: string destination_logical_port: description: | The UUID of the destination logical port. in: body required: false type: string destination_port_range_max: description: | Maximum destination protocol port. in: body required: false type: integer destination_port_range_min: description: | Minimum destination protocol port. in: body required: false type: integer egress_port_id: description: | The UUID of the egress Neutron port. in: body required: true type: string ethertype: description: | L2 ethertype. Can be IPv4 or IPv6 only. in: body required: false type: string flow_classifier_id: description: | The UUID of the flow-classifier. in: body required: true type: string flow_classifiers: description: | List of flow-classifier UUIDs. in: body required: false type: array ingress_port_id: description: | The UUID of the ingress Neutron port. in: body required: true type: string l7_parameters: description: | A dictionary of L7 parameters. in: body required: false type: object port_chain_name: description: | The name of the port-chain. in: body required: true type: string port_pair_group_id: description: | The UUID of the port-pair-group. in: body required: true type: string port_pair_group_name: description: | The name of the port-pair-group. in: body required: true type: string port_pair_groups: description: | List of port-pair-group UUIDs. in: body required: true type: array port_pair_id: description: | The UUID of the port-pair. in: body required: true type: string port_pair_name: description: | The name of the port-pair. in: body required: true type: string port_pairs: description: | List of port-pair UUIDs. in: body required: true type: array protocol: description: | IP protocol. in: body required: false type: string service_function_parameters: description: | A dictionary of service function parameters. in: body required: false type: object source_ip_prefix: description: | The source IP prefix. in: body required: false type: string source_logical_port: description: | The UUID of the source logical port. in: body required: false type: string source_port_range_max: description: | Maximum source protocol port. in: body required: false type: integer source_port_range_min: description: | Minimum source protocol port. in: body required: false type: integer tenant_id: description: | The UUID of the tenant in a multi-tenancy cloud. in: body required: true type: string networking-sfc-10.0.0/api-ref/source/conf.py0000664000175000017500000001573213656750333020717 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # networking-sfc documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set to # its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys html_theme = 'openstackdocs' html_theme_options = { "sidebar_mode": "toc", } extensions = [ 'openstackdocstheme', 'os_api_ref', ] # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # # source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Networking SFC API Reference' copyright = u'2010-present, OpenStack Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # from neutron.version import version_info # The full version, including alpha/beta/rc tags. # release = version_info.release_string() release = '1.0.0' # The short X.Y version. # version = version_info.version_string() version = '1.0.0.' # openstackdocstheme options repository_name = 'openstack/networking-sfc' bug_project = 'networking-sfc' bug_tag = 'api-ref' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # The reST default role (used for this markup: `text`) to use # for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_use_modindex = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'networkingsfcdoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'NetworkingSfc.tex', u'OpenStack Networking SFC API Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True networking-sfc-10.0.0/api-ref/source/sfc-port-pairs.inc0000664000175000017500000001042413656750333022755 0ustar zuulzuul00000000000000.. -*- rst -*- .. needs:method_verification .. needs:parameter_verification .. needs:example_verification .. needs:body_verification ========================= Port Pairs (port-pairs) ========================= Lists, shows information for, creates, updates and deletes port pairs. List Port Pairs =============== .. rest_method:: GET /v1.0/sfc/port_pairs Lists port pairs. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - tenant_id: tenant_id Response -------- .. rest_parameters:: parameters.yaml - id: port_pair_id - tenant_id: tenant_id - name: port_pair_name - description: description - port_pair_groups: port_pair_groups - flow_classifiers: flow_classifiers Response Example ---------------- **Example List port pairs: JSON response** .. literalinclude:: ../../doc/api_samples/sfc-port-pairs/port-pair-list-resp.json :language: javascript Create Port Pair ================ .. rest_method:: POST /v1.0/sfc/port_pairs Creates a port pair. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - tenant_id: tenant_id - name: port_pair_name - description: description - ingress: ingress_port_id - egress: egress_port_id - service_function_parameters: service_function_parameters Request Example --------------- **Example Create port pair: JSON request** .. literalinclude:: ../../doc/api_samples/sfc-port-pairs/port-pair-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - id: port_pair_id - tenant_id: tenant_id - name: port_pair_name - description: description - ingress: ingress_port_id - egress: egress_port_id - service_function_parameters: service_function_parameters Response Example ---------------- **Example Create port pair: JSON response** .. literalinclude:: ../../doc/api_samples/sfc-port-pairs/port-pair-create-resp.json :language: javascript Show Port Pair Details ====================== .. rest_method:: GET /v1.0/sfc/port_pairs/{port_pair_id} Shows details for a port pair. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - id: port_pair_id - tenant_id: tenant_id Response -------- .. rest_parameters:: parameters.yaml - id: port_pair_id - tenant_id: tenant_id - name: port_pair_name - description: description - ingress: ingress_port_id - egress: egress_port_id - service_function_parameters: service_function_parameters Response Example ---------------- **Example Show port pair: JSON response** .. literalinclude:: ../../doc/api_samples/sfc-port-pairs/port-pair-get-resp.json :language: javascript Update Port Pair ================ .. rest_method:: PUT /v1.0/sfc/port_pairs/{port_pair_id} Updates a port pair. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - id: port_pair_id - tenant_id: tenant_id - name: port_pair_name - description: description Request Example --------------- **Example Update port pair: JSON request** .. literalinclude:: ../../doc/api_samples/sfc-port-pairs/port-pair-update-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - id: port_pair_id - tenant_id: tenant_id - name: port_pair_name - description: description - ingress: ingress_port_id - egress: egress_port_id - service_function_parameters: service_function_parameters Response Example ---------------- **Example Update port pair: JSON response** .. literalinclude:: ../../doc/api_samples/sfc-port-pairs/port-pair-update-resp.json :language: javascript Delete Port Pair ================ .. rest_method:: DELETE /v1.0/sfc/port-pairs/{port_pair_id} Deletes a port pair. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - id: port_pair_id - tenant_id: tenant_id Response -------- There is no body content for the response of a successful Delete request. networking-sfc-10.0.0/api-ref/source/sfc-port-pair-groups.inc0000664000175000017500000001047113656750333024111 0ustar zuulzuul00000000000000.. -*- rst -*- .. needs:method_verification .. needs:parameter_verification .. needs:example_verification .. needs:body_verification ===================================== Port Pair Groups (port-pair-groups) ===================================== Lists, shows information for, creates, updates and deletes port pair groups. List Port Pair Groups ===================== .. rest_method:: GET /v1.0/sfc/port_pair_groups Lists port pair groups. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - tenant_id: tenant_id Response -------- .. rest_parameters:: parameters.yaml - id: port_pair_group_id - tenant_id: tenant_id - name: port_pair_group_name - description: description - port_pairs: port_pairs Response Example ---------------- **Example List port pair groups: JSON response** .. literalinclude:: ../../doc/api_samples/sfc-port-pair-groups/port-pair-group-list-resp.json :language: javascript Create Port Pair Group ====================== .. rest_method:: POST /v1.0/sfc/port_pair_groups Creates a port pair group. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - tenant_id: tenant_id - name: port_pair_group_name - description: description - port_pairs: port_pairs **Example Create port pair group: JSON request** .. literalinclude:: ../../doc/api_samples/sfc-port-pair-groups/port-pair-group-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - id: port_pair_group_id - tenant_id: tenant_id - name: port_pair_group_name - description: description - port_pairs: port_pairs Response Example ---------------- **Example Create port pair group: JSON response** .. literalinclude:: ../../doc/api_samples/sfc-port-pair-groups/port-pair-group-create-resp.json :language: javascript Show Port Pair Group Details ============================ .. rest_method:: GET /v1.0/sfc/port_pair_groups/{port_pair_group_id} Shows details for a port pair group. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - id: port_pair_group_id - tenant_id: tenant_id Response -------- .. rest_parameters:: parameters.yaml - id: port_pair_group_id - tenant_id: tenant_id - name: port_pair_group_name - description: description - port_pairs: port_pairs Response Example ---------------- **Example Show port pair group: JSON response** .. literalinclude:: ../../doc/api_samples/sfc-port-pair-groups/port-pair-group-get-resp.json :language: javascript Update Port Pair Group ====================== .. rest_method:: PUT /v1.0/sfc/port_pair_groups/{port_pair_group_id} Updates a port pair group. The current list of port pairs is replaced by the port pair list in the Update request. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - id: port_pair_group_id - tenant_id: tenant_id - name: port_pair_group_name - description: description - port_pairs: port_pairs Request Example --------------- **Example Update port pair group: JSON request** .. literalinclude:: ../../doc/api_samples/sfc-port-pair-groups/port-pair-group-update-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - id: port_pair_group_id - tenant_id: tenant_id - name: port_pair_group_name - description: description - port_pairs: port_pairs Response Example ---------------- **Example Update port pair group: JSON response** .. literalinclude:: ../../doc/api_samples/sfc-port-pair-groups/port-pair-group-update-resp.json :language: javascript Delete Port Pair Group ====================== .. rest_method:: DELETE /v1.0/sfc/port-pair-groups/{port_pair_group_id} Deletes a port pair group. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - id: port_pair_group_id - tenant_id: tenant_id Response -------- There is no body content for the response of a successful Delete request. networking-sfc-10.0.0/api-ref/source/sfc-chains.inc0000664000175000017500000001105013656750333022116 0ustar zuulzuul00000000000000.. -*- rst -*- .. needs:method_verification .. needs:parameter_verification .. needs:example_verification .. needs:body_verification =========================== Port Chains (port-chains) =========================== Lists, shows information for, creates, updates and deletes port chains. List Port Chains ================ .. rest_method:: GET /v1.0/sfc/port_chains Lists port chains. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - tenant_id: tenant_id Response -------- .. rest_parameters:: parameters.yaml - id: chain_id - tenant_id: tenant_id - name: port_chain_name - description: description - port_pair_groups: port_pair_groups - flow_classifiers: flow_classifiers - chain_parameters: chain_parameters Response Example ---------------- **Example List port chains: JSON response** .. literalinclude:: ../../doc/api_samples/sfc-chains/port-chain-list-resp.json :language: javascript Create Port Chain ================= .. rest_method:: POST /v1.0/sfc/port_chains Creates a port chain. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - tenant_id: tenant_id - name: port_chain_name - description: description - port_pair_groups: port_pair_groups - flow_classifiers: flow_classifiers - chain_parameters: chain_parameters Request Example --------------- **Example Create port chain: JSON request** .. literalinclude:: ../../doc/api_samples/sfc-chains/port-chain-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - id: chain_id - tenant_id: tenant_id - name: port_chain_name - description: description - port_pair_groups: port_pair_groups - flow_classifiers: flow_classifiers - chain_parameters: chain_parameters Response Example ---------------- **Example Create port chain: JSON response** .. literalinclude:: ../../doc/api_samples/sfc-chains/port-chain-create-resp.json :language: javascript Show Port Chain Details ======================= .. rest_method:: GET /v1.0/sfc/port_chains/{port_chain_id} Shows details for a port chain. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - tenant_id: tenant_id - id: chain_id Response -------- .. rest_parameters:: parameters.yaml - tenant_id: tenant_id - id: chain_id - name: port_chain_name - description: description - port_pair_groups: port_pair_groups - flow_classifiers: flow_classifiers - chain_parameters: chain_parameters Response Example ---------------- **Example Show port chain: JSON response** .. literalinclude:: ../../doc/api_samples/sfc-chains/port-chain-get-resp.json :language: javascript Update Port Chain ================= .. rest_method:: PUT /v1.0/sfc/port_chains/{port_chain_id} Updates a port chain. The current list of port pair groups is replaced by the port pair group list in the Update request. The current list of flow classifiers is replaced by the flow classifier list in the Update request. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - id: chain_id - tenant_id: tenant_id - name: port_chain_name - description: description - port_pair_groups: port_pair_groups - flow_classifiers: flow_classifiers Request Example --------------- **Example Update port chain: JSON request** .. literalinclude:: ../../doc/api_samples/sfc-chains/port-chain-update-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - id: chain_id - tenant_id: tenant_id - name: port_chain_name - description: description - port_pair_groups: port_pair_groups - flow_classifiers: flow_classifiers Response Example ---------------- **Example Update port chain: JSON response** .. literalinclude:: ../../doc/api_samples/sfc-chains/port-chain-update-resp.json :language: javascript Delete Port Chain ================= .. rest_method:: DELETE /v1.0/sfc/port-chains/{port_chain_id} Deletes a port chain. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - id: chain_id - tenant_id: tenant_id Response -------- There is no body content for the response of a successful Delete request. networking-sfc-10.0.0/api-ref/source/index.rst0000664000175000017500000000034113656750333021247 0ustar zuulzuul00000000000000:tocdepth: 2 ================== Networking SFC API ================== .. rest_expand_all:: .. include:: sfc-chains.inc .. include:: sfc-port-pair-groups.inc .. include:: sfc-port-pairs.inc .. include:: sfc-classifiers.inc networking-sfc-10.0.0/PKG-INFO0000664000175000017500000001354413656750461015673 0ustar zuulzuul00000000000000Metadata-Version: 1.2 Name: networking-sfc Version: 10.0.0 Summary: APIs and implementations to support Service Function Chaining in Neutron. Home-page: https://docs.openstack.org/networking-sfc/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ============================================================ Service Function Chaining Extension for OpenStack Networking ============================================================ Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/networking-sfc.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on Service Function Chaining API ----------------------------- This project provides APIs and implementations to support Service Function Chaining in Neutron. Service Function Chaining is a mechanism for overriding the basic destination based forwarding that is typical of IP networks. It is conceptually related to Policy Based Routing in physical networks but it is typically thought of as a Software Defined Networking technology. It is often used in conjunction with security functions although it may be used for a broader range of features. Fundamentally SFC is the ability to cause network packet flows to route through a network via a path other than the one that would be chosen by routing table lookups on the packet's destination IP address. It is most commonly used in conjunction with Network Function Virtualization when recreating in a virtual environment a series of network functions that would have traditionally been implemented as a collection of physical network devices connected in series by cables. A very simple example of a service chain would be one that forces all traffic from point A to point B to go through a firewall even though the firewall is not literally between point A and B from a routing table perspective. A more complex example is an ordered series of functions, each implemented in multiple VMs, such that traffic must flow through one VM at each hop in the chain but the network uses a hashing algorithm to distribute different flows across multiple VMs at each hop. This is an initial release, feedback is requested from users and the API may evolve based on that feedback. * Free software: Apache license * Source: https://opendev.org/openstack/networking-sfc * Documentation: https://docs.openstack.org/networking-sfc/latest * Overview: https://launchpad.net/networking-sfc * Bugs: https://bugs.launchpad.net/networking-sfc * Blueprints: https://blueprints.launchpad.net/networking-sfc * Wiki: https://wiki.openstack.org/wiki/Neutron/ServiceInsertionAndChaining * Release notes: https://docs.openstack.org/releasenotes/networking-sfc/ Features -------- * Creation of Service Function Chains consisting of an ordered sequence of Service Functions. SFs are virtual machines (or potentially physical devices) that perform a network function such as firewall, content cache, packet inspection, or any other function that requires processing of packets in a flow from point A to point B. * Reference implementation with Open vSwitch * Flow classification mechanism (ability to select and act on traffic) * Vendor neutral API * Modular plugin driver architecture Service Function Chaining Key Contributors ------------------------------------------ * Cathy Zhang (Project Lead): https://launchpad.net/~cathy-h-zhang * Louis Fourie: https://launchpad.net/~lfourie * Paul Carver: https://launchpad.net/~pcarver * Vikram: https://launchpad.net/~vikschw * Mohankumar: https://blueprints.launchpad.net/~mohankumar-n * Rao Fei: https://launchpad.net/~milo-frao * Xiaodong Wang: https://launchpad.net/~xiaodongwang991481 * Ramanjaneya Reddy Palleti: https://launchpad.net/~ramanjieee * Stephen Wong: https://launchpad.net/~s3wong * Igor Duarte Cardoso: https://launchpad.net/~igordcard * Prithiv: https://launchpad.net/~prithiv * Akihiro Motoki: https://launchpad.net/~amotoki * Swaminathan Vasudevan: https://launchpad.net/~swaminathan-vasudevan * Armando Migliaccio https://launchpad.net/~armando-migliaccio * Kyle Mestery https://launchpad.net/~mestery Background on the Subject of Service Function Chaining ------------------------------------------------------ * Original Neutron bug (request for enhancement): https://bugs.launchpad.net/neutron/+bug/1450617 * https://blueprints.launchpad.net/neutron/+spec/neutron-api-extension-for-service-chaining * https://blueprints.launchpad.net/neutron/+spec/common-service-chaining-driver-api * https://wiki.opnfv.org/display/VFG/Openstack+Based+VNF+Forwarding+Graph Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Requires-Python: >=3.6 networking-sfc-10.0.0/bindep.txt0000664000175000017500000000003213656750333016562 0ustar zuulzuul00000000000000libpq-dev [platform:dpkg] networking-sfc-10.0.0/networking_sfc/0000775000175000017500000000000013656750461017611 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/__init__.py0000664000175000017500000000123613656750333021722 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version __version__ = pbr.version.VersionInfo( 'networking_sfc').version_string() networking-sfc-10.0.0/networking_sfc/version.py0000664000175000017500000000126513656750333021652 0ustar zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('networking-sfc') networking-sfc-10.0.0/networking_sfc/cli/0000775000175000017500000000000013656750461020360 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/cli/flow_classifier.py0000664000175000017500000001670313656750333024112 0ustar zuulzuul00000000000000# Copyright (c) 2015 Huawei Technologies India Pvt.Limited. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutronclient.common import extension from neutronclient.common import utils from neutronclient.neutron import v2_0 as neutronv20 from networking_sfc._i18n import _ from networking_sfc.cli import port_pair as pp FLOW_CLASSIFIER_RESOURCE = 'flow_classifier' def get_flowclassifier_id(client, id_or_name): return neutronv20.find_resourceid_by_name_or_id(client, FLOW_CLASSIFIER_RESOURCE, id_or_name) class FlowClassifier(extension.NeutronClientExtension): resource = FLOW_CLASSIFIER_RESOURCE resource_plural = '%ss' % resource object_path = '/sfc/%s' % resource_plural resource_path = '/sfc/%s/%%s' % resource_plural versions = ['2.0'] class FlowClassifierCreate(extension.ClientExtensionCreate, FlowClassifier): """Create a Flow Classifier.""" shell_command = 'flow-classifier-create' def add_known_arguments(self, parser): parser.add_argument( 'name', metavar='NAME', help=_('Name of the Flow Classifier.')) parser.add_argument( '--description', help=_('Description for the Flow Classifier.')) parser.add_argument( '--protocol', help=_('IP protocol name. Protocol name should be as per ' 'IANA standard.')) parser.add_argument( '--ethertype', default='IPv4', choices=['IPv4', 'IPv6'], help=_('L2 ethertype, default is IPv4.')) parser.add_argument( '--source-port', help=_('Source protocol port (allowed range [1,65535]. Must be ' 'specified as a:b, where a=min-port and b=max-port.)')) parser.add_argument( '--destination-port', help=_('Destination protocol port (allowed range [1,65535]. Must ' 'be specified as a:b, where a=min-port and b=max-port.)')) parser.add_argument( '--source-ip-prefix', help=_('Source IP prefix or subnet.')) parser.add_argument( '--destination-ip-prefix', help=_('Destination IP prefix or subnet.')) parser.add_argument( '--logical-source-port', help=_('ID or name of the neutron source port.')) parser.add_argument( '--logical-destination-port', help=_('ID or name of the neutron destination port.')) parser.add_argument( '--l7-parameters', metavar='type=TYPE[,url=URL_PATH]', type=utils.str2dict, help=_('Dictionary of L7-parameters. Currently, no value is ' 'supported for this option.')) def args2body(self, parsed_args): body = {} client = self.get_client() if parsed_args.logical_source_port: body['logical_source_port'] = pp.get_port_id( client, parsed_args.logical_source_port) if parsed_args.logical_destination_port: body['logical_destination_port'] = pp.get_port_id( client, parsed_args.logical_destination_port) if parsed_args.source_port: self._fill_protocol_port_info(body, 'source', parsed_args.source_port) if parsed_args.destination_port: self._fill_protocol_port_info(body, 'destination', parsed_args.destination_port) neutronv20.update_dict(parsed_args, body, ['name', 'description', 'protocol', 'source_ip_prefix', 'destination_ip_prefix', 'ethertype', 'l7_parameters']) return {self.resource: body} def _fill_protocol_port_info(self, body, port_type, port_val): min_port, sep, max_port = port_val.partition(":") if not max_port: max_port = min_port body[port_type + '_port_range_min'] = int(min_port) body[port_type + '_port_range_max'] = int(max_port) class FlowClassifierUpdate(extension.ClientExtensionUpdate, FlowClassifier): """Update Flow Classifier information.""" shell_command = 'flow-classifier-update' def add_known_arguments(self, parser): parser.add_argument( '--name', metavar='NAME', help=_('Name of the Flow Classifier.')) parser.add_argument( '--description', help=_('Description for the Flow Classifier.')) def args2body(self, parsed_args): body = {} neutronv20.update_dict(parsed_args, body, ['name', 'description']) return {self.resource: body} class FlowClassifierDelete(extension.ClientExtensionDelete, FlowClassifier): """Delete a given Flow Classifier.""" shell_command = 'flow-classifier-delete' class FlowClassifierList(extension.ClientExtensionList, FlowClassifier): """List Flow Classifiers that belong to a given tenant.""" shell_command = 'flow-classifier-list' list_columns = ['id', 'name', 'summary'] pagination_support = True sorting_support = True def extend_list(self, data, parsed_args): for d in data: val = [] if d.get('protocol'): protocol = d['protocol'].upper() else: protocol = 'any' protocol = 'protocol: ' + protocol val.append(protocol) val.append(self._get_protocol_port_details(d, 'source')) val.append(self._get_protocol_port_details(d, 'destination')) if 'logical_source_port' in d: val.append('neutron_source_port: ' + str(d['logical_source_port'])) if 'logical_destination_port' in d: val.append('neutron_destination_port: ' + str(d['logical_destination_port'])) if 'l7_parameters' in d: l7_param = 'l7_parameters: {%s}' % ','.join(d['l7_parameters']) val.append(l7_param) d['summary'] = ',\n'.join(val) def _get_protocol_port_details(self, data, type): type_ip_prefix = type + '_ip_prefix' ip_prefix = data.get(type_ip_prefix) if not ip_prefix: ip_prefix = 'any' min_port = data.get(type + '_port_range_min') if min_port is None: min_port = 'any' max_port = data.get(type + '_port_range_max') if max_port is None: max_port = 'any' return '%s[port]: %s[%s:%s]' % ( type, ip_prefix, min_port, max_port) class FlowClassifierShow(extension.ClientExtensionShow, FlowClassifier): """Show information of a given Flow Classifier.""" shell_command = 'flow-classifier-show' networking-sfc-10.0.0/networking_sfc/cli/__init__.py0000664000175000017500000000000013656750333022455 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/cli/port_chain.py0000664000175000017500000001330213656750333023055 0ustar zuulzuul00000000000000# Copyright (c) 2015 Huawei Technologies India Pvt.Limited. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutronclient.common import extension from neutronclient.common import utils from neutronclient.neutron import v2_0 as neutronv20 from networking_sfc._i18n import _ from networking_sfc.cli import flow_classifier as fc from networking_sfc.cli import port_pair_group as ppg PORT_CHAIN_RESOURCE = 'port_chain' class PortChain(extension.NeutronClientExtension): resource = PORT_CHAIN_RESOURCE resource_plural = '%ss' % resource object_path = '/sfc/%s' % resource_plural resource_path = '/sfc/%s/%%s' % resource_plural versions = ['2.0'] class PortChainCreate(extension.ClientExtensionCreate, PortChain): """Create a Port Chain.""" shell_command = 'port-chain-create' def add_known_arguments(self, parser): parser.add_argument( 'name', metavar='NAME', help=_('Name of the Port Chain.')) parser.add_argument( '--description', help=_('Description for the Port Chain.')) parser.add_argument( '--port-pair-group', metavar='PORT-PAIR-GROUP', dest='port_pair_groups', default=[], required=True, action='append', help=_('ID or name of the Port Pair Group. ' 'This option can be repeated.')) parser.add_argument( '--flow-classifier', default=[], metavar='FLOW-CLASSIFIER', dest='flow_classifiers', action='append', help=_('ID or name of the Flow Classifier.' 'This option can be repeated.')) parser.add_argument( '--chain-parameters', metavar='[correlation=CORRELATION_TYPE, symmetric=BOOLEAN_TYPE]', type=utils.str2dict_type(optional_keys=['correlation', 'symmetric']), help=_('Dictionary of chain parameters. Supports ' 'correlation=mpls and symmetric=true|false.')) def args2body(self, parsed_args): body = {} client = self.get_client() if parsed_args.port_pair_groups: body['port_pair_groups'] = [ppg.get_port_pair_group_id(client, p) for p in parsed_args.port_pair_groups] if parsed_args.flow_classifiers: body['flow_classifiers'] = [fc.get_flowclassifier_id(client, f) for f in parsed_args.flow_classifiers] neutronv20.update_dict(parsed_args, body, ['name', 'description', 'chain_parameters']) return {self.resource: body} class PortChainUpdate(extension.ClientExtensionUpdate, PortChain): """Update Port Chain's information.""" shell_command = 'port-chain-update' def add_known_arguments(self, parser): parser.add_argument( '--name', metavar='NAME', help=_('Name of the Port Chain.')) parser.add_argument( '--description', help=_('Description for the Port Chain.')) fw_args = parser.add_mutually_exclusive_group() fw_args.add_argument( '--flow-classifier', metavar='FLOW-CLASSIFIER', dest='flow_classifiers', action='append', help=_('ID or name of the Flow Classifier. ' 'This option can be repeated.')) fw_args.add_argument( '--no-flow-classifier', action='store_true', help=_('Associate no Flow Classifier with the Port Chain.')) parser.add_argument( '--port-pair-group', metavar='PORT-PAIR-GROUP', dest='port_pair_groups', action='append', help=_('ID or name of the port pair group. ' 'This option can be repeated.')) def args2body(self, parsed_args): body = {} client = self.get_client() if parsed_args.flow_classifiers: body['flow_classifiers'] = [fc.get_flowclassifier_id(client, f) for f in parsed_args.flow_classifiers] elif parsed_args.no_flow_classifier: body['flow_classifiers'] = [] if parsed_args.port_pair_groups: body['port_pair_groups'] = [ppg.get_port_pair_group_id(client, p) for p in parsed_args.port_pair_groups] neutronv20.update_dict(parsed_args, body, ['name', 'description']) return {self.resource: body} class PortChainDelete(extension.ClientExtensionDelete, PortChain): """Delete a given Port Chain.""" shell_command = 'port-chain-delete' class PortChainList(extension.ClientExtensionList, PortChain): """List Port Chains that belong to a given tenant.""" shell_command = 'port-chain-list' list_columns = ['id', 'name', 'port_pair_groups', 'flow_classifiers'] pagination_support = True sorting_support = True class PortChainShow(extension.ClientExtensionShow, PortChain): """Show information of a given Port Chain.""" shell_command = 'port-chain-show' networking-sfc-10.0.0/networking_sfc/cli/port_pair_group.py0000664000175000017500000002100713656750333024143 0ustar zuulzuul00000000000000# Copyright (c) 2015 Huawei Technologies India Pvt.Limited. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutronclient.common import extension from neutronclient.common import utils from neutronclient.neutron import v2_0 as neutronv20 from networking_sfc._i18n import _ from networking_sfc.cli import port_pair as pp PORT_PAIR_GROUP_RESOURCE = 'port_pair_group' def get_port_pair_group_id(client, id_or_name): return neutronv20.find_resourceid_by_name_or_id(client, PORT_PAIR_GROUP_RESOURCE, id_or_name) class PortPairGroup(extension.NeutronClientExtension): resource = PORT_PAIR_GROUP_RESOURCE resource_plural = '%ss' % resource object_path = '/sfc/%s' % resource_plural resource_path = '/sfc/%s/%%s' % resource_plural versions = ['2.0'] def add_common_arguments(parser): parser.add_argument( '--description', help=_('Description for the Port Pair Group.')) parser.add_argument( '--port-pair', metavar='PORT-PAIR', dest='port_pairs', default=[], action='append', help=_('ID or name of the Port Pair. ' 'This option can be repeated.')) def update_common_args2body(client, body, parsed_args): if parsed_args.port_pairs: body['port_pairs'] = [(pp.get_port_pair_id(client, pp1)) for pp1 in parsed_args.port_pairs] neutronv20.update_dict(parsed_args, body, ['name', 'description']) return body class PortPairGroupCreate(extension.ClientExtensionCreate, PortPairGroup): """Create a Port Pair Group.""" shell_command = 'port-pair-group-create' def add_known_arguments(self, parser): parser.add_argument( 'name', metavar='NAME', help=_('Name of the Port Pair Group.')) add_common_arguments(parser) parser.add_argument( '--port-pair-group-parameters', metavar='[lb_fields=LB_FIELDS, ppg_n_tuple_mapping=TUPLE_VALUES]', type=utils.str2dict_type(optional_keys=['lb_fields', 'ppg_n_tuple_mapping']), help=_('Dictionary of Port pair group parameters. ' 'Currently, only \'&\' separated string of the lb_fields ' 'and ppg_n_tuple_mapping are supported. For ' 'ppg_n_tuple_mapping the supported command is ' '\'key=value\' separated by \'&\'. Support ' 'ppg_n_tuple_mapping keys are: source_ip_prefix_ingress, ' 'source_ip_prefix_egress, destination_ip_prefix_ingress, ' 'destination_ip_prefix_egress, source_port_ingress, ' 'source_port_egress, destination_port_ingress, ' 'destination_port_egress.')) def args2body(self, parsed_args): body = {} if parsed_args.port_pair_group_parameters: body['port_pair_group_parameters'] = {} for key, value in parsed_args.port_pair_group_parameters.items(): # Setup lb_fields key and value(s) if key == 'lb_fields': body['port_pair_group_parameters'][key] = ([ field for field in value.split('&') if field]) # Setup ppg_n_tuple_mapping key(s) and value(s) elif key == 'ppg_n_tuple_mapping': # Reorganize ppg_n_tuple_mapping values in dict with # structure {'ppg_n_tuple_mapping': 'ingress_n_tuple': {}, # 'egress_n_tuple': {}} ppg_n_tuple_dict = {} ingress_n_tuple_dict = {} egress_n_tuple_dict = {} # Split input of ppg_n_tuple_mapping by & and = raw_data = dict([ (content[0], content[1]) for content in [sub_field.split('=') for sub_field in [field for field in value.split('&') if field]] ]) # Store ingress_n_tuple values and egress_n_tuple values # into corresponding dictionary, and expand # source_port_range and destination_port_range to # source_port_range_min, source_port_range_max, # destination_port_range_min, and # destination_port_range_max if exits for n_tuple_key, n_tuple_value in raw_data.items(): if n_tuple_key[-7:] == "ingress": n_tuple_key = n_tuple_key[:-8] if ( 'source_port' in n_tuple_key or 'destination_port' in n_tuple_key ): min_port, sep, max_port = \ n_tuple_value.partition(":") if not max_port: max_port = min_port ingress_n_tuple_dict[ n_tuple_key + '_range_min'] = int(min_port) ingress_n_tuple_dict[ n_tuple_key + '_range_max'] = int(max_port) else: ingress_n_tuple_dict[n_tuple_key] = \ n_tuple_value elif n_tuple_key[-6:] == "egress": n_tuple_key = n_tuple_key[:-7] if ( 'source_port' in n_tuple_key or 'destination_port' in n_tuple_key ): min_port, sep, max_port = \ n_tuple_value.partition(":") if not max_port: max_port = min_port egress_n_tuple_dict[ n_tuple_key + '_range_min'] = int(min_port) egress_n_tuple_dict[ n_tuple_key + '_range_max'] = int(max_port) else: egress_n_tuple_dict[n_tuple_key] = \ n_tuple_value ppg_n_tuple_dict['ingress_n_tuple'] = ingress_n_tuple_dict ppg_n_tuple_dict['egress_n_tuple'] = egress_n_tuple_dict body['port_pair_group_parameters'][key] = ppg_n_tuple_dict else: body['port_pair_group_parameters'][key] = value body = update_common_args2body(self.get_client(), body, parsed_args) return {self.resource: body} class PortPairGroupUpdate(extension.ClientExtensionUpdate, PortPairGroup): """Update Port Pair Group's information.""" shell_command = 'port-pair-group-update' def add_known_arguments(self, parser): parser.add_argument( '--name', metavar='NAME', help=_('Name of the Port Pair Group.')) add_common_arguments(parser) def args2body(self, parsed_args): body = {} body = update_common_args2body(self.get_client(), body, parsed_args) return {self.resource: body} class PortPairGroupDelete(extension.ClientExtensionDelete, PortPairGroup): """Delete a given Port Pair Group.""" shell_command = 'port-pair-group-delete' class PortPairGroupList(extension.ClientExtensionList, PortPairGroup): """List Port Pair Groups that belongs to a given tenant.""" shell_command = 'port-pair-group-list' list_columns = ['id', 'name', 'port_pairs'] pagination_support = True sorting_support = True class PortPairGroupShow(extension.ClientExtensionShow, PortPairGroup): """Show information of a given Port Pair Group.""" shell_command = 'port-pair-group-show' networking-sfc-10.0.0/networking_sfc/cli/port_pair.py0000664000175000017500000001116513656750333022733 0ustar zuulzuul00000000000000# Copyright (c) 2015 Huawei Technologies India Pvt.Limited. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutronclient.common import extension from neutronclient.common import utils from neutronclient.neutron import v2_0 as neutronv20 from networking_sfc._i18n import _ PORT_RESOURCE = 'port' PORT_PAIR_RESOURCE = 'port_pair' def get_port_id(client, id_or_name): return neutronv20.find_resourceid_by_name_or_id(client, PORT_RESOURCE, id_or_name) def get_port_pair_id(client, id_or_name): return neutronv20.find_resourceid_by_name_or_id(client, PORT_PAIR_RESOURCE, id_or_name) class PortPair(extension.NeutronClientExtension): resource = PORT_PAIR_RESOURCE resource_plural = '%ss' % resource object_path = '/sfc/%s' % resource_plural resource_path = '/sfc/%s/%%s' % resource_plural versions = ['2.0'] class PortPairCreate(extension.ClientExtensionCreate, PortPair): """Create a Port Pair.""" shell_command = 'port-pair-create' def add_known_arguments(self, parser): parser.add_argument( 'name', metavar='NAME', help=_('Name of the Port Pair.')) parser.add_argument( '--description', help=_('Description for the Port Pair.')) parser.add_argument( '--ingress', required=True, help=_('ID or name of the ingress neutron port.')) parser.add_argument( '--egress', required=True, help=_('ID or name of the egress neutron port.')) parser.add_argument( '--service-function-parameters', metavar='[correlation=CORRELATION_TYPE, weight=WEIGHT]', type=utils.str2dict_type(optional_keys=['correlation', 'weight']), help=_('Dictionary of Service function parameters. ' 'Currently, only correlation=None|mpls and weight ' 'is supported. Default correlation is None. Weight is ' 'an integer that influences the selection' 'of a port pair within a port pair group ' 'for a flow. The higher the weight, the more flows will ' 'hash to the port pair. The default weight is 1.')) def args2body(self, parsed_args): body = {} client = self.get_client() if parsed_args.ingress: body['ingress'] = get_port_id(client, parsed_args.ingress) if parsed_args.egress: body['egress'] = get_port_id(client, parsed_args.egress) neutronv20.update_dict(parsed_args, body, ['name', 'description', 'service_function_parameters']) return {self.resource: body} class PortPairUpdate(extension.ClientExtensionUpdate, PortPair): """Update Port Pair's information.""" shell_command = 'port-pair-update' def add_known_arguments(self, parser): parser.add_argument( '--name', metavar='NAME', help=_('Name of the Port Pair.')) parser.add_argument( '--description', help=_('Description for the Port Pair.')) def args2body(self, parsed_args): body = {} neutronv20.update_dict(parsed_args, body, ['name', 'description']) return {self.resource: body} class PortPairDelete(extension.ClientExtensionDelete, PortPair): """Delete a given Port Pair.""" shell_command = 'port-pair-delete' class PortPairList(extension.ClientExtensionList, PortPair): """List Port Pairs that belongs to a given tenant.""" shell_command = 'port-pair-list' list_columns = ['id', 'name', 'ingress', 'egress'] pagination_support = True sorting_support = True class PortPairShow(extension.ClientExtensionShow, PortPair): """Show information of a given Port Pair.""" shell_command = 'port-pair-show' networking-sfc-10.0.0/networking_sfc/services/0000775000175000017500000000000013656750461021434 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/__init__.py0000664000175000017500000000000013656750333023531 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/flowclassifier/0000775000175000017500000000000013656750461024450 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/flowclassifier/__init__.py0000664000175000017500000000000013656750333026545 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/flowclassifier/drivers/0000775000175000017500000000000013656750461026126 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/flowclassifier/drivers/__init__.py0000664000175000017500000000000013656750333030223 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/flowclassifier/drivers/dummy/0000775000175000017500000000000013656750461027261 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/flowclassifier/drivers/dummy/__init__.py0000664000175000017500000000000013656750333031356 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/flowclassifier/drivers/dummy/dummy.py0000664000175000017500000000237213656750333030770 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from networking_sfc.services.flowclassifier.drivers import base as fc_driver class DummyDriver(fc_driver.FlowClassifierDriverBase): """Flow Classifier Driver Dummy Class.""" def initialize(self): pass @log_helpers.log_method_call def create_flow_classifier(self, context): pass @log_helpers.log_method_call def update_flow_classifier(self, context): pass @log_helpers.log_method_call def delete_flow_classifier(self, context): pass @log_helpers.log_method_call def create_flow_classifier_precommit(self, context): pass networking-sfc-10.0.0/networking_sfc/services/flowclassifier/drivers/ovs/0000775000175000017500000000000013656750461026735 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/flowclassifier/drivers/ovs/__init__.py0000664000175000017500000000000013656750333031032 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/flowclassifier/drivers/ovs/driver.py0000664000175000017500000000342613656750333030605 0ustar zuulzuul00000000000000# Copyright 2016 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from networking_sfc.services.flowclassifier.common import exceptions as exc from networking_sfc.services.flowclassifier.drivers import base as fc_driver class OVSFlowClassifierDriver(fc_driver.FlowClassifierDriverBase): """FlowClassifier Driver Base Class.""" def initialize(self): pass @log_helpers.log_method_call def create_flow_classifier(self, context): pass @log_helpers.log_method_call def update_flow_classifier(self, context): pass @log_helpers.log_method_call def delete_flow_classifier(self, context): pass @log_helpers.log_method_call def create_flow_classifier_precommit(self, context): """OVS Driver precommit before transaction committed. Make sure the logical_source_port is not None. """ flow_classifier = context.current logical_source_port = flow_classifier['logical_source_port'] if logical_source_port is None: raise exc.FlowClassifierBadRequest(message=( 'FlowClassifier %s does not set ' 'logical source port in ovs driver' % flow_classifier['id'])) networking-sfc-10.0.0/networking_sfc/services/flowclassifier/drivers/base.py0000664000175000017500000000326213656750333027413 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class FlowClassifierDriverBaseLegacy(object): """Flow Classifier Driver Base Class for legacy driver interface""" @abc.abstractmethod def create_flow_classifier(self, context): pass @abc.abstractmethod def update_flow_classifier(self, context): pass @six.add_metaclass(abc.ABCMeta) class FlowClassifierDriverBase(FlowClassifierDriverBaseLegacy): """Flow Classifier Driver Base Class.""" @abc.abstractmethod def create_flow_classifier_precommit(self, context): pass def create_flow_classifier_postcommit(self, context): self.create_flow_classifier(context) @abc.abstractmethod def delete_flow_classifier(self, context): pass def delete_flow_classifier_precommit(self, context): pass def delete_flow_classifier_postcommit(self, context): pass def update_flow_classifier_precommit(self, context): pass def update_flow_classifier_postcommit(self, context): self.update_flow_classifier(context) networking-sfc-10.0.0/networking_sfc/services/flowclassifier/common/0000775000175000017500000000000013656750461025740 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/flowclassifier/common/__init__.py0000664000175000017500000000000013656750333030035 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/flowclassifier/common/config.py0000664000175000017500000000204513656750333027556 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from networking_sfc._i18n import _ FLOWCLASSIFIER_DRIVER_OPTS = [ cfg.ListOpt('drivers', default=['dummy'], help=_("An ordered list of flow classifier drivers " "entrypoints to be loaded from the " "networking_sfc.flowclassifier.drivers namespace.")), ] cfg.CONF.register_opts(FLOWCLASSIFIER_DRIVER_OPTS, "flowclassifier") networking-sfc-10.0.0/networking_sfc/services/flowclassifier/common/exceptions.py0000664000175000017500000000231113656750333030466 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Exceptions used by FlowClassifier plugin and drivers.""" from neutron_lib import exceptions from networking_sfc._i18n import _ class FlowClassifierDriverError(exceptions.NeutronException): """flow classifier driver call failed.""" message = _("%(method)s failed.") class FlowClassifierException(exceptions.NeutronException): """Base for flow classifier driver exceptions returned to user.""" pass class FlowClassifierBadRequest(exceptions.BadRequest, FlowClassifierException): """Base for flow classifier driver bad request exceptions.""" message = _("%(message)s") networking-sfc-10.0.0/networking_sfc/services/flowclassifier/common/context.py0000664000175000017500000000251213656750333027774 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class FlowClassifierPluginContext(object): """Flow Classifier context base class.""" def __init__(self, plugin, plugin_context): self._plugin = plugin self._plugin_context = plugin_context class FlowClassifierContext(FlowClassifierPluginContext): def __init__(self, plugin, plugin_context, flowclassifier, original_flowclassifier=None): super(FlowClassifierContext, self).__init__(plugin, plugin_context) self._flowclassifier = flowclassifier self._original_flowclassifier = original_flowclassifier @property def current(self): return self._flowclassifier @property def original(self): return self._original_flowclassifier networking-sfc-10.0.0/networking_sfc/services/flowclassifier/driver_manager.py0000664000175000017500000001276413656750333030017 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from stevedore.named import NamedExtensionManager from networking_sfc.services.flowclassifier.common import exceptions as fc_exc LOG = log.getLogger(__name__) cfg.CONF.import_opt('drivers', 'networking_sfc.services.flowclassifier.common.config', group='flowclassifier') class FlowClassifierDriverManager(NamedExtensionManager): """Implementation of Flow Classifier drivers.""" def __init__(self, namespace='networking_sfc.flowclassifier.drivers', names=cfg.CONF.flowclassifier.drivers): # Registered flow classifier drivers, keyed by name. self.drivers = {} # Ordered list of flow classifier drivers, defining # the order in which the drivers are called. self.ordered_drivers = [] LOG.info("Configured Flow Classifier drivers: %s", names) super(FlowClassifierDriverManager, self).__init__( namespace, names, invoke_on_load=True, name_order=True) LOG.info("Loaded Flow Classifier drivers: %s", self.names()) self._register_drivers() @classmethod def make_test_instance(cls, extensions, namespace='TESTING'): """Construct a test FlowClassifierDriverManager Test instances are passed a list of extensions to use rather than loading them from entry points. :param extensions: Pre-configured Extension instances :type extensions: list of :class:`~stevedore.extension.Extension` :param namespace: The namespace for the manager; used only for identification since the extensions are passed in. :type namespace: str :return: The manager instance, initialized for testing """ o = super(FlowClassifierDriverManager, cls).make_test_instance( extensions, namespace=namespace) o.drivers = {} o.ordered_drivers = [] o._register_drivers() return o def _register_drivers(self): """Register all Flow Classifier drivers. This method should only be called once in the FlowClassifierDriverManager constructor. """ for ext in self: self.drivers[ext.name] = ext self.ordered_drivers.append(ext) LOG.info("Registered Flow Classifier drivers: %s", [driver.name for driver in self.ordered_drivers]) def initialize(self): # ServiceChain bulk operations requires each driver to support them self.native_bulk_support = True for driver in self.ordered_drivers: LOG.info("Initializing Flow Classifier driver '%s'", driver.name) driver.obj.initialize() self.native_bulk_support &= getattr(driver.obj, 'native_bulk_support', True) def _call_drivers(self, method_name, context, raise_orig_exc=False): """Helper method for calling a method across all drivers. :param method_name: name of the method to call :param context: context parameter to pass to each method call :param raise_orig_exc: whether or not to raise the original driver exception, or use a general one """ for driver in self.ordered_drivers: try: getattr(driver.obj, method_name)(context) except Exception as e: # This is an internal failure. LOG.exception(e) LOG.error( "Flow Classifier driver '%(name)s' " "failed in %(method)s", {'name': driver.name, 'method': method_name} ) if raise_orig_exc: raise else: raise fc_exc.FlowClassifierDriverError( method=method_name ) def create_flow_classifier_precommit(self, context): """Driver precommit before the db transaction committed.""" self._call_drivers("create_flow_classifier_precommit", context, raise_orig_exc=True) def create_flow_classifier_postcommit(self, context): self._call_drivers("create_flow_classifier_postcommit", context) def update_flow_classifier_precommit(self, context): self._call_drivers("update_flow_classifier_precommit", context) def update_flow_classifier_postcommit(self, context): self._call_drivers("update_flow_classifier_postcommit", context) def delete_flow_classifier(self, context): self._call_drivers("delete_flow_classifier", context) def delete_flow_classifier_precommit(self, context): self._call_drivers("delete_flow_classifier_precommit", context) def delete_flow_classifier_postcommit(self, context): self._call_drivers("delete_flow_classifier_postcommit", context) networking-sfc-10.0.0/networking_sfc/services/flowclassifier/plugin.py0000664000175000017500000001130713656750333026320 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from neutron_lib.db import api as db_api from neutron_lib.plugins import directory from networking_sfc.db import flowclassifier_db as fc_db from networking_sfc.extensions import flowclassifier as fc_ext from networking_sfc.services.flowclassifier.common import context as fc_ctx from networking_sfc.services.flowclassifier.common import exceptions as fc_exc from networking_sfc.services.flowclassifier import driver_manager as fc_driver LOG = logging.getLogger(__name__) class FlowClassifierPlugin(fc_db.FlowClassifierDbPlugin): """Implementation of the Plugin.""" supported_extension_aliases = [fc_ext.FLOW_CLASSIFIER_EXT] path_prefix = fc_ext.FLOW_CLASSIFIER_PREFIX def __init__(self): self.driver_manager = fc_driver.FlowClassifierDriverManager() super(FlowClassifierPlugin, self).__init__() self.driver_manager.initialize() def _get_port(self, context, id): port = super(FlowClassifierPlugin, self)._get_port(context, id) return directory.get_plugin().get_port(context, port['id']) @log_helpers.log_method_call def create_flow_classifier(self, context, flow_classifier): with db_api.CONTEXT_WRITER.using(context): fc_db = super(FlowClassifierPlugin, self).create_flow_classifier( context, flow_classifier) fc_db_context = fc_ctx.FlowClassifierContext(self, context, fc_db) self.driver_manager.create_flow_classifier_precommit( fc_db_context) try: self.driver_manager.create_flow_classifier_postcommit( fc_db_context) except fc_exc.FlowClassifierDriverError as e: LOG.exception(e) with excutils.save_and_reraise_exception(): LOG.error("Create flow classifier failed, " "deleting flow_classifier '%s'", fc_db['id']) self.delete_flow_classifier(context, fc_db['id']) return fc_db @log_helpers.log_method_call def update_flow_classifier(self, context, id, flow_classifier): with db_api.CONTEXT_WRITER.using(context): original_flowclassifier = self.get_flow_classifier(context, id) updated_fc = super( FlowClassifierPlugin, self ).update_flow_classifier( context, id, flow_classifier) fc_db_context = fc_ctx.FlowClassifierContext( self, context, updated_fc, original_flowclassifier=original_flowclassifier) self.driver_manager.update_flow_classifier_precommit(fc_db_context) try: self.driver_manager.update_flow_classifier_postcommit( fc_db_context) except fc_exc.FlowClassifierDriverError as e: LOG.exception(e) with excutils.save_and_reraise_exception(): LOG.error("Update flow classifier failed, " "flow_classifier '%s'", updated_fc['id']) return updated_fc @log_helpers.log_method_call def delete_flow_classifier(self, context, fc_id): fc = self.get_flow_classifier(context, fc_id) fc_context = fc_ctx.FlowClassifierContext(self, context, fc) try: self.driver_manager.delete_flow_classifier(fc_context) except fc_exc.FlowClassifierDriverError as e: LOG.exception(e) with excutils.save_and_reraise_exception(): LOG.error("Delete flow classifier failed, " "flow_classifier '%s'", fc_id) with db_api.CONTEXT_WRITER.using(context): fc = self.get_flow_classifier(context, fc_id) fc_context = fc_ctx.FlowClassifierContext(self, context, fc) super(FlowClassifierPlugin, self).delete_flow_classifier( context, fc_id) self.driver_manager.delete_flow_classifier_precommit(fc_context) self.driver_manager.delete_flow_classifier_postcommit(fc_context) networking-sfc-10.0.0/networking_sfc/services/sfc/0000775000175000017500000000000013656750461022207 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/sfc/__init__.py0000664000175000017500000000000013656750333024304 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/sfc/drivers/0000775000175000017500000000000013656750461023665 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/sfc/drivers/__init__.py0000664000175000017500000000000013656750333025762 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/sfc/drivers/dummy/0000775000175000017500000000000013656750461025020 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/sfc/drivers/dummy/__init__.py0000664000175000017500000000000013656750333027115 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/sfc/drivers/dummy/dummy.py0000664000175000017500000000451713656750333026532 0ustar zuulzuul00000000000000# Copyright 2017 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from networking_sfc.services.sfc.drivers import base as sfc_driver class DummyDriver(sfc_driver.SfcDriverBase): """SFC Driver Dummy Class.""" def initialize(self): pass @log_helpers.log_method_call def create_port_chain(self, context): pass @log_helpers.log_method_call def delete_port_chain(self, context): pass @log_helpers.log_method_call def update_port_chain(self, context): pass @log_helpers.log_method_call def create_port_chain_precommit(self, context): pass @log_helpers.log_method_call def create_port_pair_group(self, context): pass @log_helpers.log_method_call def delete_port_pair_group(self, context): pass @log_helpers.log_method_call def update_port_pair_group(self, context): pass @log_helpers.log_method_call def create_port_pair(self, context): pass @log_helpers.log_method_call def delete_port_pair(self, context): pass @log_helpers.log_method_call def update_port_pair(self, context): pass @log_helpers.log_method_call def create_service_graph_precommit(self, context): pass @log_helpers.log_method_call def create_service_graph_postcommit(self, context): pass @log_helpers.log_method_call def update_service_graph_precommit(self, context): pass @log_helpers.log_method_call def update_service_graph_postcommit(self, context): pass @log_helpers.log_method_call def delete_service_graph_precommit(self, context): pass @log_helpers.log_method_call def delete_service_graph_postcommit(self, context): pass networking-sfc-10.0.0/networking_sfc/services/sfc/drivers/ovs/0000775000175000017500000000000013656750461024474 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/sfc/drivers/ovs/__init__.py0000664000175000017500000000000013656750333026571 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/sfc/drivers/ovs/rpc.py0000664000175000017500000000573313656750333025640 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import oslo_messaging from neutron_lib.agent import topics from neutron_lib import rpc as n_rpc from networking_sfc.services.sfc.drivers.ovs import rpc_topics as sfc_topics LOG = logging.getLogger(__name__) class SfcRpcCallback(object): """Sfc RPC server.""" def __init__(self, driver): self.target = oslo_messaging.Target(version='1.0') self.driver = driver def get_flowrules_by_host_portid(self, context, **kwargs): host = kwargs.get('host') port_id = kwargs.get('port_id') LOG.debug('from port-chain service plugin') pcfrs = self.driver.get_flowrules_by_host_portid( context, host, port_id) LOG.debug('host: %s, port_id: %s', host, port_id) return pcfrs def update_flowrules_status(self, context, **kwargs): flowrules_status = kwargs.get('flowrules_status') LOG.info('update_flowrules_status: %s', flowrules_status) for flowrule_dict in flowrules_status: self.driver.update_flowrule_status(context, flowrule_dict['id'], flowrule_dict['status']) class SfcAgentRpcClient(object): """RPC client for ovs sfc agent.""" def __init__(self, topic=sfc_topics.SFC_AGENT): self.topic = topic target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def ask_agent_to_update_flow_rules(self, context, flow_rule): LOG.debug('Ask agent on the specific host to update flows ') LOG.debug('flow_rule: %s', flow_rule) host = flow_rule.get('host') cctxt = self.client.prepare( topic=topics.get_topic_name( self.topic, sfc_topics.PORTFLOW, topics.UPDATE), server=host) cctxt.cast(context, 'update_flow_rules', flowrule_entries=flow_rule) def ask_agent_to_delete_flow_rules(self, context, flow_rule): LOG.debug('Ask agent on the specific host to delete flows ') LOG.debug('flow_rule: %s', flow_rule) host = flow_rule.get('host') cctxt = self.client.prepare( topic=topics.get_topic_name( self.topic, sfc_topics.PORTFLOW, topics.DELETE), server=host) cctxt.cast(context, 'delete_flow_rules', flowrule_entries=flow_rule) networking-sfc-10.0.0/networking_sfc/services/sfc/drivers/ovs/driver.py0000664000175000017500000023714213656750333026350 0ustar zuulzuul00000000000000# Copyright 2017 Futurewei. All rights reserved. # Copyright 2017 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db import models_v2 import neutron.plugins.ml2.drivers.l2pop.db as l2pop_db import neutron.plugins.ml2.drivers.l2pop.rpc as l2pop_rpc from neutron_lib import constants as const from neutron_lib import context as n_context from neutron_lib.db import model_query from neutron_lib.plugins import directory from neutron_lib import rpc as n_rpc from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_serialization import jsonutils from networking_sfc.extensions import flowclassifier from networking_sfc.extensions import sfc from networking_sfc.services.sfc.common import exceptions as exc from networking_sfc.services.sfc.drivers import base as driver_base from networking_sfc.services.sfc.drivers.ovs import constants as ovs_const from networking_sfc.services.sfc.drivers.ovs import db as ovs_sfc_db from networking_sfc.services.sfc.drivers.ovs import rpc as ovs_sfc_rpc from networking_sfc.services.sfc.drivers.ovs import rpc_topics as sfc_topics LOG = logging.getLogger(__name__) class OVSSfcDriver(driver_base.SfcDriverBase, ovs_sfc_db.OVSSfcDriverDB): """Sfc Driver Base Class.""" def initialize(self): super(OVSSfcDriver, self).initialize() self.ovs_driver_rpc = ovs_sfc_rpc.SfcAgentRpcClient( sfc_topics.SFC_AGENT ) self.rpc_ctx = n_context.get_admin_context_without_session() self._setup_rpc() def _setup_rpc(self): # Setup a rpc server self.topic = sfc_topics.SFC_PLUGIN self.endpoints = [ovs_sfc_rpc.SfcRpcCallback(self)] self.conn = n_rpc.Connection() self.conn.create_consumer(self.topic, self.endpoints, fanout=False) self.conn.consume_in_threads() def _get_port_infos(self, context, port, segment, agent_host): if not agent_host: return agent = l2pop_db.get_agent_by_host(context, agent_host) if not agent: return agent_ip = l2pop_db.get_agent_ip(agent) if not agent_ip: LOG.warning("Unable to retrieve the agent ip, check the agent " "configuration.") return if not segment: LOG.warning("Port %(port)s updated by agent %(agent)s " "isn't bound to any segment", {'port': port['id'], 'agent': agent}) return network_types = l2pop_db.get_agent_l2pop_network_types(agent) if network_types is None: network_types = l2pop_db.get_agent_tunnel_types(agent) if segment['network_type'] not in network_types: return fdb_entries = [l2pop_rpc.PortInfo(mac_address=port['mac_address'], ip_address=ip['ip_address']) for ip in port['fixed_ips']] return agent_ip, fdb_entries @log_helpers.log_method_call def _get_agent_fdb(self, context, port, segment, agent_host): agent_ip, port_fdb_entries = self._get_port_infos(context, port, segment, agent_host) if not port_fdb_entries: return network_id = port['network_id'] other_fdb_entries = {network_id: {'segment_id': segment['segmentation_id'], 'network_type': segment['network_type'], 'ports': {agent_ip: []}}} # Agent is removing its last activated port in this network, # other agents needs to be notified to delete their flooding entry. other_fdb_entries[network_id]['ports'][agent_ip].append( const.FLOODING_ENTRY) # Notify other agents to remove fdb rules for current port if port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE: fdb_entries = port_fdb_entries other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries return other_fdb_entries @log_helpers.log_method_call def _get_remote_pop_ports(self, context, flow_rule): pop_ports = [] if not flow_rule.get('next_hops', None): return pop_ports pop_host = flow_rule['host_id'] core_plugin = directory.get_plugin() drivers = core_plugin.mechanism_manager.mech_drivers l2pop_driver = drivers.get('l2population', None) if l2pop_driver is None: return pop_ports for next_hop in flow_rule['next_hops']: agent_active_ports = \ l2pop_db.get_agent_network_active_port_count( self.admin_context, pop_host, next_hop['net_uuid']) segment = {} if agent_active_ports == 0: filters = {'network_id': [next_hop['net_uuid']], 'mac_address': [next_hop['mac_address']]} ports = core_plugin.get_ports(self.admin_context, filters=filters) if not ports: continue segment['network_type'] = next_hop['network_type'] segment['segmentation_id'] = next_hop['segment_id'] pop_ports.append((ports[0], segment)) return pop_ports @log_helpers.log_method_call def _get_network_other_active_entry_count(self, host, remote_port_id): agent_active_ports = 0 port_detail = self.get_port_detail_by_filter( dict(ingress=remote_port_id)) for assoc in port_detail['path_nodes']: node = self.get_path_node(assoc['pathnode_id']) if node['node_type'] != ovs_const.SRC_NODE: filter = dict(nsp=node['nsp'], nsi=node['nsi'] + 1) pre_node = self.get_path_node_by_filter(filter) if not pre_node: continue for each in pre_node['portpair_details']: pre_port = self.get_port_detail_by_filter(dict(id=each)) if host == pre_port['host_id']: agent_active_ports += 1 return agent_active_ports def _call_on_l2pop_driver(self, context, method_name, flow_rule): pop_host = flow_rule['host_id'] pop_ports = self._get_remote_pop_ports(context, flow_rule) for (port, segment) in pop_ports: port_id = port['id'] host_id = port['binding:host_id'] active_entry_count = self._get_network_other_active_entry_count( pop_host, port_id) if active_entry_count == 1: fdb_entry = self._get_agent_fdb( context, port, segment, host_id) getattr(l2pop_rpc.L2populationAgentNotifyAPI(), method_name)( self.rpc_ctx, fdb_entry, pop_host) def _update_agent_fdb_entries(self, context, flow_rule): self._call_on_l2pop_driver(context, "add_fdb_entries", flow_rule) def _delete_agent_fdb_entries(self, context, flow_rule): self._call_on_l2pop_driver(context, "remove_fdb_entries", flow_rule) def _get_subnet_by_port(self, id): core_plugin = directory.get_plugin() port = core_plugin.get_port(self.admin_context, id) subnet = None for ip in port['fixed_ips']: subnet = core_plugin.get_subnet(self.admin_context, ip["subnet_id"]) # currently only support one subnet for a port break return subnet @log_helpers.log_method_call def _get_portgroup_members(self, context, pg_id, fwd_path): next_group_members = [] ppg_obj = context._plugin._get_port_pair_group(context._plugin_context, pg_id) group_intid = ppg_obj['group_id'] tap_enabled = ppg_obj['tap_enabled'] LOG.debug('group_intid: %s', group_intid) pg = context._plugin.get_port_pair_group(context._plugin_context, pg_id) for pp_id in pg['port_pairs']: pp = context._plugin.get_port_pair(context._plugin_context, pp_id) filters = {} if pp.get('ingress', None): filters.update({'ingress': pp['ingress']}) if pp.get('egress', None): filters.update({'egress': pp['egress']}) pd = self.get_port_detail_by_filter(filters) if pd: next_group_members.append( dict(portpair_id=pd['id'], weight=1, tap_enabled=tap_enabled)) if fwd_path is False: next_group_members.reverse() return group_intid, next_group_members def _get_port_pair_detail_by_port_pair(self, context, port_pair_id): pp = context._plugin.get_port_pair(context._plugin_context, port_pair_id) filters = {} if pp.get('ingress', None): filters.update({'ingress': pp['ingress']}) if pp.get('egress', None): filters.update({'egress': pp['egress']}) pd = self.get_port_detail_by_filter(filters) return pd @log_helpers.log_method_call def _add_flowclassifier_port_assoc(self, fc_ids, project_id, src_node): for fc in self._get_fcs_by_ids(fc_ids): need_assoc = True # lookup the source port, when it's reverse path # set logical_destination_port to be source port if src_node['fwd_path'] is False: src_pd_filter = dict( egress=fc['logical_destination_port'], project_id=project_id, ) else: src_pd_filter = dict( egress=fc['logical_source_port'], project_id=project_id, ) src_pd = self.get_port_detail_by_filter(src_pd_filter) if not src_pd: # Create source port detail src_pd = self._create_port_pair_detail(src_pd_filter) LOG.debug('create src port detail: %s', src_pd) else: for path_node in src_pd['path_nodes']: if path_node['pathnode_id'] == src_node['id']: need_assoc = False if need_assoc: # Create associate relationship assco_args = { 'portpair_id': src_pd['id'], 'pathnode_id': src_node['id'], 'weight': 1, } sna = self.create_pathport_assoc(assco_args) LOG.debug('create assoc src port with node: %s', sna) src_node['portpair_details'].append(src_pd['id']) def _remove_flowclassifier_port_assoc(self, fc_ids, project_id, src_nodes): if not fc_ids: return for src_node in src_nodes: for fc in self._get_fcs_by_ids(fc_ids): # delete source port detail if src_node['fwd_path'] is False: src_pd_filter = dict( egress=fc['logical_destination_port'], project_id=project_id ) elif src_node['fwd_path']: src_pd_filter = dict( egress=fc['logical_source_port'], project_id=project_id ) pds = self.get_port_details_by_filter(src_pd_filter) if pds: for pd in pds: # update src_node portpair_details refence info if src_node and pd['id'] in src_node[ 'portpair_details' ]: self.delete_pathport_assoc(src_node['id'], pd['id']) src_node['portpair_details'].remove(pd['id']) # path_nodes is [] when passing from path_node # not delete any src_node in portpair_details table # why need to check len(pd['path_nodes'] # if len(pd['path_nodes']) == 1: self.delete_port_pair_detail(pd['id']) @log_helpers.log_method_call def _create_portchain_path(self, context, port_chain, fwd_path): path_nodes = [] # Create an assoc object for chain_id and path_id # context = context._plugin_context path_id = port_chain['chain_id'] if not path_id: LOG.error('No path_id available for creating port chain path') return port_pair_groups = port_chain['port_pair_groups'] sf_path_length = len(port_pair_groups) # Detect cross-subnet transit, returns ppg details ppg_details_list = self._validate_chain(context, port_chain, port_pair_groups, fwd_path) next_group_intid = None next_group_members = None previous_node_id = None # get the init and last port_pair_group if fwd_path: next_group_intid, next_group_members = self._get_portgroup_members( context, port_chain['port_pair_groups'][0], fwd_path) else: next_group_intid, next_group_members = self._get_portgroup_members( context, port_chain['port_pair_groups'][sf_path_length - 1], fwd_path) # Create a head node object for port chain src_args = {'project_id': port_chain['project_id'], 'node_type': ovs_const.SRC_NODE, 'nsp': path_id, 'nsi': 0xff, 'portchain_id': port_chain['id'], 'status': ovs_const.STATUS_BUILDING, 'next_group_id': next_group_intid, 'next_hop': jsonutils.dumps(next_group_members), 'fwd_path': fwd_path, 'ppg_n_tuple_mapping': None, 'tap_enabled': False } src_node = self.create_path_node(src_args) LOG.debug('create src node: %s', src_node) path_nodes.append(src_node) previous_node_id = src_node['id'] # Create a destination node object for port chain dst_args = { 'project_id': port_chain['project_id'], 'node_type': ovs_const.DST_NODE, 'nsp': path_id, 'nsi': 0xff - sf_path_length - 1, 'portchain_id': port_chain['id'], 'status': ovs_const.STATUS_BUILDING, 'next_group_id': None, 'next_hop': None, 'fwd_path': fwd_path, 'ppg_n_tuple_mapping': None, 'tap_enabled': False } dst_node = self.create_path_node(dst_args) LOG.debug('create dst node: %s', dst_node) # need to pass project_id here self._add_flowclassifier_port_assoc( port_chain['flow_classifiers'], port_chain['project_id'], src_node ) curr_group = context._plugin.get_port_pair_group( context._plugin_context, port_pair_groups[0]) for i in range(sf_path_length): cur_group_members = next_group_members # next_group for next hop if i < sf_path_length - 1: if fwd_path: next_group_intid, next_group_members = ( self._get_portgroup_members( context, port_pair_groups[i + 1], fwd_path) ) elif fwd_path is False: next_group_intid, next_group_members = ( self._get_portgroup_members( context, port_pair_groups[sf_path_length - 2 - i], fwd_path) ) else: next_group_intid = None next_group_members = None # Get current port_pair_group based on current port_pair_group id if i < sf_path_length: if fwd_path: curr_group = context._plugin.get_port_pair_group( context._plugin_context, port_pair_groups[i]) elif fwd_path is False: curr_group = context._plugin.get_port_pair_group( context._plugin_context, port_pair_groups[sf_path_length - 1 - i]) # Set curr_ppg_flag = 1, when current port_pair_group has # ppg_n_tuple_mapping dict in port_pair_group_parameters ppg_n_tuple_mapping = curr_group.get( 'port_pair_group_parameters', None) if ppg_n_tuple_mapping: ppg_n_tuple_mapping = ppg_n_tuple_mapping.get( 'ppg_n_tuple_mapping', None) if ppg_n_tuple_mapping: if ppg_n_tuple_mapping.get('ingress_n_tuple', None) or \ ppg_n_tuple_mapping.get('egress_n_tuple', None): ppg_n_tuple_mapping['curr_ppg_flag'] = 1 else: ppg_n_tuple_mapping = None # Create a node object node_args = { 'project_id': port_chain['project_id'], 'node_type': ovs_const.SF_NODE, 'nsp': path_id, 'nsi': 0xfe - i, 'portchain_id': port_chain['id'], 'status': ovs_const.STATUS_BUILDING, 'next_group_id': next_group_intid, 'next_hop': ( None if not next_group_members else jsonutils.dumps(next_group_members) ), 'fwd_path': fwd_path, 'ppg_n_tuple_mapping': ( None if not ppg_n_tuple_mapping else jsonutils.dumps(ppg_n_tuple_mapping)), 'tap_enabled': ppg_details_list[i]['tap_enabled'], 'previous_node_id': previous_node_id } sf_node = self.create_path_node(node_args) LOG.debug('chain path node: %s', sf_node) previous_node_id = sf_node['id'] # Create the assocation objects that combine the pathnode_id with # the ingress of the port_pairs in the current group # when port_group does not reach tail for member in cur_group_members: assco_args = {'portpair_id': member['portpair_id'], 'pathnode_id': sf_node['id'], 'weight': member['weight'], } sfna = self.create_pathport_assoc(assco_args) LOG.debug('create assoc port with node: %s', sfna) sf_node['portpair_details'].append(member['portpair_id']) path_nodes.append(sf_node) path_nodes = self._update_ppg_n_tuple_in_flow_rule( path_nodes, fwd_path, sf_path_length) dst_node = self.update_path_node( dst_node['id'], {'previous_node_id': previous_node_id}) path_nodes.append(dst_node) path_nodes = self._update_sc_path_parameter(path_nodes) return path_nodes # Function to update adjacent node ppg_n_tuple_mapping values if have # ppg_n_tuple_mapping in path_nodes def _update_ppg_n_tuple_in_flow_rule(self, path_nodes, fwd_path, sf_path_length): for index, node in enumerate(path_nodes): if not node['ppg_n_tuple_mapping']: # Update reverse SRC_NODE ppg_n_tuple_mapping based on last # forward SF_NODE ppg_n_tuple_mapping if ( node['node_type'] == ovs_const.SRC_NODE and fwd_path is False ): last_fwd_sf_node = self.get_path_node_by_filter( filters={'portchain_id': node['portchain_id'], 'nsi': 0xff - sf_path_length, 'fwd_path': True} ) if last_fwd_sf_node: if last_fwd_sf_node['ppg_n_tuple_mapping']: ppg_n_tuple_mapping = jsonutils.loads( last_fwd_sf_node['ppg_n_tuple_mapping']) if ( ppg_n_tuple_mapping.get( 'ingress_n_tuple', None) or ppg_n_tuple_mapping.get('egress_n_tuple', None) ): # Set curr_ppg_flag = 2 when # ppg_n_tuple_mapping inherits from # last_fwd_sf_node ppg_n_tuple_mapping['curr_ppg_flag'] = 2 node = self.update_path_node( node['id'], {'ppg_n_tuple_mapping': None if not ppg_n_tuple_mapping else jsonutils.dumps(ppg_n_tuple_mapping)} ) path_nodes[index] = node # Update SF_NODE ppg_n_tuple_mapping based on current # ppg_n_tuple_mapping and prev_node ppg_n_tuple_mapping elif node['node_type'] == ovs_const.SF_NODE: if node['nsi'] == 0xfe: prev_node = path_nodes[index - 2] else: prev_node = path_nodes[index - 1] if prev_node: if prev_node['ppg_n_tuple_mapping']: ppg_n_tuple_mapping = jsonutils.loads( prev_node['ppg_n_tuple_mapping']) # Set curr_ppg_flag = 2 when # ppg_n_tuple_mapping inherits from previous # sf_node. Set curr_ppg_flag = 3 when # ppg_n_tuple_mapping inherits from previous # sf_node, and fwd_path is False if ( ppg_n_tuple_mapping.get( 'ingress_n_tuple', None) or ppg_n_tuple_mapping.get('egress_n_tuple', None) ): if ( ppg_n_tuple_mapping[ 'curr_ppg_flag'] == 1 and fwd_path is False ): ppg_n_tuple_mapping['curr_ppg_flag'] = 3 else: ppg_n_tuple_mapping['curr_ppg_flag'] = 2 node = self.update_path_node( node['id'], {'ppg_n_tuple_mapping': None if not ppg_n_tuple_mapping else jsonutils.dumps(ppg_n_tuple_mapping)} ) path_nodes[index] = node return path_nodes def _delete_path_node_port_flowrule(self, context, node, port, pc_corr, fc_ids): # if this port is not binding, don't to generate flow rule if not port['host_id']: return flow_rule = self._build_portchain_flowrule_body(node, port, pc_corr, del_fc_ids=fc_ids) self.ovs_driver_rpc.ask_agent_to_delete_flow_rules(self.admin_context, flow_rule) self._delete_agent_fdb_entries(context, flow_rule) def _delete_path_node_flowrule(self, context, node, pc_corr, fc_ids): if node['portpair_details'] is None: return for each in node['portpair_details']: port = self.get_port_detail_by_filter(dict(id=each)) if port: self._delete_path_node_port_flowrule( context, node, port, pc_corr, fc_ids) @log_helpers.log_method_call def _delete_portchain_path(self, context, port_chain): pds = self.get_path_nodes_by_filter( dict(portchain_id=port_chain['id'])) src_nodes = [] if pds: for pd in pds: if pd['node_type'] == ovs_const.SRC_NODE: src_nodes.append(pd) pc_corr = port_chain['chain_parameters']['correlation'] updated_pd = self._update_tap_enabled_node(pd) self._delete_path_node_flowrule( context, updated_pd, pc_corr, port_chain['flow_classifiers'] ) self._build_tap_ingress_flow(pc_corr, updated_pd, pd['fwd_path'], delete=True) for pd in pds: self.delete_path_node(pd['id']) # delete the ports on the traffic classifier self._remove_flowclassifier_port_assoc( port_chain['flow_classifiers'], port_chain['project_id'], src_nodes ) def _update_path_node_next_hops(self, flow_rule): node_next_hops = [] if not flow_rule['next_hop']: return None next_hops = jsonutils.loads(flow_rule['next_hop']) if not next_hops: return None core_plugin = directory.get_plugin() for member in next_hops: detail = {} port_detail = self.get_port_detail_by_filter( dict(id=member['portpair_id'])) if not port_detail or not port_detail['host_id']: continue detail['local_endpoint'] = port_detail['local_endpoint'] detail['weight'] = member['weight'] detail['mac_address'] = port_detail['mac_address'] detail['in_mac_address'] = port_detail['in_mac_address'] detail['segment_id'] = port_detail['segment_id'] detail['network_type'] = port_detail['network_type'] detail['pp_corr'] = port_detail['correlation'] port = core_plugin.get_port( self.admin_context, port_detail['ingress']) detail['net_uuid'] = port['network_id'] detail['tap_enabled'] = member['tap_enabled'] if member['tap_enabled']: self._update_next_hop_details(flow_rule, port_detail, detail) node_next_hops.append(detail) flow_rule['next_hops'] = node_next_hops flow_rule.pop('next_hop') return node_next_hops # As of the "no-SFC-proxy" MPLS correlation support, pc_corr is passed. # pc_corr is expected to be the port-chain's correlation parameter, i.e. # the chain-wide SFC Encapsulation protocol. This is necessary to compare # with port-pairs' correlations and decide whether SFC Proxy is needed. def _build_portchain_flowrule_body(self, node, port, pc_corr, add_fc_ids=None, del_fc_ids=None): node_info = node.copy() node_info.pop('project_id') node_info.pop('portpair_details') port_info = port.copy() port_info.pop('project_id') port_info.pop('id') port_info.pop('path_nodes') # port_info.pop('host_id') # change egress port in src_nodes if( node_info['fwd_path'] is False and node_info['node_type'] == ovs_const.SRC_NODE ): if add_fc_ids is not None: fcs = self._get_fcs_by_ids(add_fc_ids) elif del_fc_ids is not None: fcs = self._get_fcs_by_ids(del_fc_ids) for fc in fcs: if fc['logical_source_port'] == port_info['egress']: port_info['egress'] = fc['logical_destination_port'] flow_rule = dict(node_info, **port_info) flow_rule['pc_corr'] = pc_corr if node_info['node_type'] != ovs_const.SRC_NODE: flow_rule['pp_corr'] = port_info.get('correlation') else: # there's no correlation for src nodes flow_rule['pp_corr'] = None flow_rule.pop('correlation') # correlation becomes simply pp_corr # if this port belongs to an SFC Encapsulation-aware VM, # only notify the flow classifier for the 1st SF. flow_rule['add_fcs'] = self._filter_flow_classifiers( flow_rule, add_fc_ids) flow_rule['del_fcs'] = self._filter_flow_classifiers( flow_rule, del_fc_ids) self._update_portchain_group_reference_count(flow_rule, port['host_id']) # update next hop info self._update_path_node_next_hops(flow_rule) return flow_rule def _filter_flow_classifiers(self, flow_rule, fc_ids): """Filter flow classifiers. @return: list of the flow classifiers """ fc_return = [] if not fc_ids: return fc_return fcs = self._get_fcs_by_ids(fc_ids) for fc in fcs: new_fc = fc.copy() new_fc.pop('id') new_fc.pop('name') new_fc.pop('project_id') new_fc.pop('description') router_ints = const.ROUTER_INTERFACE_OWNERS logical_source_port = new_fc['logical_source_port'] if logical_source_port is not None: port_src = model_query.get_by_id( self.admin_context, models_v2.Port, logical_source_port ) if ( new_fc['source_ip_prefix'] is None and port_src['device_owner'] not in router_ints ): src_ips = port_src['fixed_ips'] # For now, only handle when the port has a single IP if len(src_ips) == 1: new_fc['source_ip_prefix'] = src_ips[0]['ip_address'] logical_destination_port = new_fc['logical_destination_port'] if logical_destination_port is not None: port_dst = model_query.get_by_id( self.admin_context, models_v2.Port, logical_destination_port ) if ( new_fc['destination_ip_prefix'] is None and port_dst['device_owner'] not in router_ints ): dst_ips = port_dst['fixed_ips'] # For now, only handle when the port has a single IP if len(dst_ips) == 1: new_fc['destination_ip_prefix'] = ( dst_ips[0]['ip_address'] ) # Update new_fc n tuple info based flow_rule['ppg_n_tuple_mapping'] # and flow_rule['fwd_path'] if flow_rule['ppg_n_tuple_mapping']: ppg_n_tuple_mapping = jsonutils.loads( flow_rule['ppg_n_tuple_mapping']) if ( flow_rule['fwd_path'] is False and ppg_n_tuple_mapping['curr_ppg_flag'] == 1 or ppg_n_tuple_mapping['curr_ppg_flag'] == 3 ): for ingress_key, ingress_value in \ ppg_n_tuple_mapping['ingress_n_tuple'].items(): new_fc[ingress_key] = ingress_value else: for egress_key, egress_value in \ ppg_n_tuple_mapping['egress_n_tuple'].items(): new_fc[egress_key] = egress_value if flow_rule['fwd_path'] is False: # swap logical_source_port & logical_destination_port new_fc['logical_source_port'] = fc['logical_destination_port'] new_fc['logical_destination_port'] = fc['logical_source_port'] if ( # add_flow & del_flow in flowrule pass into agent flow_rule['node_type'] == ovs_const.SRC_NODE and flow_rule['egress'] == new_fc['logical_source_port'] ): fc_return.append(new_fc) elif flow_rule['node_type'] == ovs_const.SF_NODE: fc_return.append(new_fc) return fc_return def _update_path_node_port_flowrules(self, context, node, port, pc_corr, add_fc_ids=None, del_fc_ids=None): # if this port is not binding, don't to generate flow rule if not port['host_id']: return flow_rule = self._build_portchain_flowrule_body(node, port, pc_corr, add_fc_ids=add_fc_ids, del_fc_ids=del_fc_ids) self.ovs_driver_rpc.ask_agent_to_update_flow_rules(self.admin_context, flow_rule) self._update_agent_fdb_entries(context, flow_rule) def _update_path_node_flowrules(self, context, node, pc_corr, add_fc_ids=None, del_fc_ids=None): if node['portpair_details'] is None: return updated_node = self._update_tap_enabled_node(node) for each in updated_node['portpair_details']: port = self.get_port_detail_by_filter(dict(id=each)) if port: self._update_path_node_port_flowrules( context, updated_node, port, pc_corr, add_fc_ids, del_fc_ids) self._build_tap_ingress_flow(pc_corr, updated_node, node['fwd_path']) def _update_path_nodes(self, context, nodes, pc_corr, add_fc_ids=None, del_fc_ids=None): for node in nodes: self._update_path_node_flowrules(context, node, pc_corr, add_fc_ids, del_fc_ids) def _get_portchain_fcs(self, port_chain): return self._get_fcs_by_ids(port_chain['flow_classifiers']) def _get_fcs_by_ids(self, fc_ids): flow_classifiers = [] if not fc_ids: return flow_classifiers # Get the portchain flow classifiers fc_plugin = ( directory.get_plugin(flowclassifier.FLOW_CLASSIFIER_EXT) ) if not fc_plugin: LOG.warning("Not found the flow classifier service plugin") return flow_classifiers for fc_id in fc_ids: fc = fc_plugin.get_flow_classifier(self.admin_context, fc_id) flow_classifiers.append(fc) return flow_classifiers @log_helpers.log_method_call def create_port_chain_precommit(self, context): """OVS Driver precommit before transaction committed. Make sure the logical_destination_port has been set when create symmetric port_chain """ port_chain = context.current symmetric = port_chain['chain_parameters'].get('symmetric') if symmetric: for fc in self._get_fcs_by_ids(port_chain['flow_classifiers']): if fc['logical_destination_port'] is None: raise exc.SfcBadRequest(message=( 'FlowClassifier %s does not set' 'logical_destination_port. logical_destination_port ' 'needed when symmetric has been set. Please recreate ' 'FlowClassifier with logical_destination_port and ' 'destination_ip_prefix.' % fc['id'] )) @log_helpers.log_method_call def create_port_chain(self, context): port_chain = context.current symmetric = port_chain['chain_parameters'].get('symmetric') if symmetric: fwd_path_nodes = self._create_portchain_path(context, port_chain, True) rev_path_nodes = self._create_portchain_path(context, port_chain, False) self._update_path_nodes( context, fwd_path_nodes, port_chain['chain_parameters']['correlation'], port_chain['flow_classifiers'], None) self._update_path_nodes( context, rev_path_nodes, port_chain['chain_parameters']['correlation'], port_chain['flow_classifiers'], None) elif symmetric is False: path_nodes = self._create_portchain_path(context, port_chain, True) self._update_path_nodes( context, path_nodes, port_chain['chain_parameters']['correlation'], port_chain['flow_classifiers'], None) @log_helpers.log_method_call def delete_port_chain(self, context): port_chain = context.current LOG.debug("to delete portchain path") self._delete_portchain_path(context, port_chain) def _get_diff_set(self, orig, cur): orig_set = set(item for item in orig) cur_set = set(item for item in cur) to_del = orig_set.difference(cur_set) to_add = cur_set.difference(orig_set) return to_del, to_add @log_helpers.log_method_call def update_port_chain(self, context): port_chain = context.current orig = context.original self._delete_portchain_path(context, orig) # recreate port_chain after delete the orig symmetric = port_chain['chain_parameters'].get('symmetric') if symmetric: fwd_path_nodes = self._create_portchain_path(context, port_chain, True) rev_path_nodes = self._create_portchain_path(context, port_chain, False) self._update_path_nodes( context, fwd_path_nodes, port_chain['chain_parameters']['correlation'], port_chain['flow_classifiers'], None) self._update_path_nodes( context, rev_path_nodes, port_chain['chain_parameters']['correlation'], port_chain['flow_classifiers'], None) elif symmetric is False: path_nodes = self._create_portchain_path(context, port_chain, True) self._update_path_nodes( context, path_nodes, port_chain['chain_parameters']['correlation'], port_chain['flow_classifiers'], None) @log_helpers.log_method_call def create_port_pair_group(self, context): pass @log_helpers.log_method_call def delete_port_pair_group(self, context): pass @log_helpers.log_method_call def update_port_pair_group(self, context): current = context.current original = context.original if set(current['port_pairs']) == set(original['port_pairs']): return # Update the path_nodes and flows for each port chain that # contains this port_pair_group # Note: _get_port_pair_group is temporarily used here. ppg_obj = context._plugin._get_port_pair_group(context._plugin_context, current['id']) port_chains = [assoc.portchain_id for assoc in ppg_obj.chain_group_associations] for chain_id in port_chains: pc = context._plugin.get_port_chain( context._plugin_context, chain_id) pc_corr = pc['chain_parameters']['correlation'] group_intid = current['group_id'] # Get the previous node prev_nodes = self.get_path_nodes_by_filter( filters={'portchain_id': chain_id, 'next_group_id': group_intid}) if not prev_nodes: continue for prev_node in prev_nodes: before_update_prev_node = prev_node.copy() # Update the previous node curr_group_intid, curr_group_members = \ self._get_portgroup_members( context, current['id'], prev_node['fwd_path']) prev_node['next_hop'] = ( jsonutils.dumps(curr_group_members) if curr_group_members else None ) # update next hop to database self.update_path_node(prev_node['id'], prev_node) self._delete_path_node_flowrule(context, before_update_prev_node, pc_corr, pc['flow_classifiers']) self._update_path_node_flowrules(context, prev_node, pc_corr, pc['flow_classifiers'], None) # Update the current node # to find the current node by using the node's next_group_id # if this node is the last, next_group_id would be None curr_pos = pc['port_pair_groups'].index(current['id']) curr_nodes = self.get_path_nodes_by_filter( filters={'portchain_id': chain_id, 'nsi': 0xfe - curr_pos}) if not curr_nodes: continue curr_node = None for temp_node in curr_nodes: if temp_node['fwd_path']: curr_node = temp_node rev_curr_pos = len(pc['port_pair_groups']) - 1 - curr_pos rev_curr_nodes = self.get_path_nodes_by_filter( filters={'portchain_id': chain_id, 'nsi': 0xfe - rev_curr_pos}) rev_curr_node = None if rev_curr_nodes is not None: for temp_node in rev_curr_nodes: if temp_node['fwd_path'] is False: rev_curr_node = temp_node # Add the port-pair-details into the current node for pp_id in ( set(current['port_pairs']) - set(original['port_pairs']) ): ppd = self._get_port_pair_detail_by_port_pair(context, pp_id) if not ppd: LOG.debug('No port_pair_detail for the port_pair: %s', pp_id) LOG.debug("Failed to update port-pair-group") return assco_args = {'portpair_id': ppd['id'], 'pathnode_id': curr_node['id'], 'weight': 1, } self.create_pathport_assoc(assco_args) updated_curr_node = self._update_tap_enabled_node(curr_node) self._update_path_node_port_flowrules( context, updated_curr_node, ppd, pc_corr, pc['flow_classifiers']) self._build_tap_ingress_flow(pc_corr=pc_corr, node=updated_curr_node, fwd_path=True) if not rev_curr_node: continue assco_args = {'portpair_id': ppd['id'], 'pathnode_id': rev_curr_node['id'], 'weight': 1, } self.create_pathport_assoc(assco_args) updated_rev_curr_node = self._update_tap_enabled_node( rev_curr_node) self._update_path_node_port_flowrules(context, updated_rev_curr_node, ppd, pc_corr, pc['flow_classifiers']) self._build_tap_ingress_flow(pc_corr=pc_corr, node=updated_rev_curr_node, fwd_path=False) # Delete the port-pair-details from the current node for pp_id in ( set(original['port_pairs']) - set(current['port_pairs']) ): ppd = self._get_port_pair_detail_by_port_pair(context, pp_id) if not ppd: LOG.debug('No port_pair_detail for the port_pair: %s', pp_id) LOG.debug("Failed to update port-pair-group") return updated_curr_node = self._update_tap_enabled_node(curr_node) self._delete_path_node_port_flowrule( context, updated_curr_node, ppd, pc_corr, pc['flow_classifiers']) self.delete_pathport_assoc(updated_curr_node['id'], ppd['id']) if not rev_curr_node: continue updated_rev_curr_node = self._update_tap_enabled_node( rev_curr_node) self._delete_path_node_port_flowrule( context, updated_rev_curr_node, ppd, pc_corr, pc['flow_classifiers']) self.delete_pathport_assoc(updated_rev_curr_node['id'], ppd['id']) @log_helpers.log_method_call def _get_port_detail_info(self, port_id): """Get port detail. @param: port_id: uuid @return: (host_id, local_ip, network_type, segment_id, service_insert_type): tuple """ core_plugin = directory.get_plugin() port_detail = core_plugin.get_port(self.admin_context, port_id) host_id, local_ip, network_type, segment_id, mac_address = ( (None, ) * 5) if port_detail: host_id = port_detail['binding:host_id'] network_id = port_detail['network_id'] mac_address = port_detail['mac_address'] network_info = core_plugin.get_network( self.admin_context, network_id) network_type = network_info['provider:network_type'] segment_id = network_info['provider:segmentation_id'] if network_type != const.TYPE_VXLAN: LOG.warning("Currently only support vxlan network") return ((None, ) * 5) elif not host_id: LOG.warning("This port has not been binding") return ((None, ) * 5) else: driver = core_plugin.type_manager.drivers.get(network_type) host_endpoint = driver.obj.get_endpoint_by_host(host_id) if host_endpoint: local_ip = host_endpoint['ip_address'] else: local_ip = None return host_id, local_ip, network_type, segment_id, mac_address @log_helpers.log_method_call def _create_port_pair_detail(self, port_pair): # since first node may not assign the ingress port, and last node # is not saved in the portpair_detail. we store the major egress port # info as the key to get the SF information. # mac_address stands for egress mac_address, and in_mac_address stands # for ingress_port mac_address in_port, e_port, host_id, local_endpoint, network_type, segment_id, \ mac_address, in_mac_address = ( (None, ) * 8) if port_pair.get('ingress', None): in_port = port_pair['ingress'] in_host_id, in_local_endpoint, in_network_type, in_segment_id, \ in_mac_address = ( self._get_port_detail_info(in_port)) if port_pair.get('egress', None): e_port = port_pair['egress'] host_id, local_endpoint, network_type, segment_id, mac_address = ( self._get_port_detail_info(e_port)) sfparams = port_pair.get('service_function_parameters') pp_corr = None if sfparams: pp_corr = sfparams.get('correlation') portpair_detail = { 'ingress': port_pair.get('ingress', None), 'egress': port_pair.get('egress', None), 'correlation': pp_corr, 'project_id': port_pair['project_id'], 'host_id': host_id, 'segment_id': segment_id, 'network_type': network_type, 'local_endpoint': local_endpoint, 'mac_address': mac_address, 'in_mac_address': in_mac_address } r = self.create_port_pair_detail(portpair_detail) LOG.debug('create port-pair detail: %s', r) return r @log_helpers.log_method_call def create_port_pair(self, context): port_pair = context.current self._create_port_pair_detail(port_pair) @log_helpers.log_method_call def delete_port_pair(self, context): port_pair = context.current pd_filter = dict(ingress=port_pair.get('ingress', None), egress=port_pair.get('egress', None), project_id=port_pair['project_id'] ) pds = self.get_port_details_by_filter(pd_filter) if pds: for pd in pds: self.delete_port_pair_detail(pd['id']) @log_helpers.log_method_call def update_port_pair(self, context): pass def get_flowrules_by_host_portid(self, context, host, port_id): port_chain_flowrules = [] sfc_plugin = directory.get_plugin(sfc.SFC_EXT) if not sfc_plugin: return port_chain_flowrules try: port_detail_list = [] # one port only may be in egress/ingress port once time. ingress_port = self.get_port_detail_by_filter( dict(ingress=port_id)) egress_port = self.get_port_detail_by_filter( dict(egress=port_id)) if not ingress_port and not egress_port: return None # SF migrate to other host if ingress_port: port_detail_list.append(ingress_port) if ingress_port['host_id'] != host: ingress_port.update(dict(host_id=host)) if egress_port: port_detail_list.append(egress_port) if egress_port['host_id'] != host: egress_port.update(dict(host_id=host)) # Get ingress flow for Tap SF if self._is_tap_ports(port_detail_list): return self._get_tap_port_ingress_flow(context, sfc_plugin, port_chain_flowrules, port_detail_list) # this is a SF if there are both egress and ingress. for i, port in enumerate(port_detail_list): nodes_assocs = port['path_nodes'] for assoc in nodes_assocs: # update current path flow rule node = self.get_path_node(assoc['pathnode_id']) port_chain = sfc_plugin.get_port_chain( context, node['portchain_id']) # if this path_node is a linking node of a graph, # then we obtain the respective flow_rule flow_rule = self._get_flow_rule_for_service_graph_node( context, node, port, port_chain) # otherwise it's a normal port chain flow_rule if not flow_rule: flow_rule = self._build_portchain_flowrule_body( node, port, port_chain['chain_parameters']['correlation'], add_fc_ids=port_chain['flow_classifiers'] ) port_chain_flowrules.append(flow_rule) self._get_egress_flowrule_if_next_hop_tap(sfc_plugin, context, port_chain_flowrules, port_detail_list) return port_chain_flowrules except Exception as e: LOG.exception(e) LOG.error("get_flowrules_by_host_portid failed") def _get_flow_rule_for_service_graph_node( self, context, node, port, port_chain): correlation = port_chain['chain_parameters']['correlation'] sfc_plugin = directory.get_plugin(sfc.SFC_EXT) if node['node_type'] == ovs_const.SRC_NODE: # check if port_chain is a dst_chain in a graph branches = sfc_plugin._get_branches( context, filters={'dst_chain': [port_chain['id']]}) if branches: matches = set() for branch in branches: src_chain = branch['src_chain'] # bnodes - "branching" nodes bnodes = self.get_path_nodes_by_filter( filters={ 'portchain_id': src_chain, 'node_type': ovs_const.SF_NODE, 'next_hops': None, 'next_group_ids': None}) # after filtering on the database, we still have to # filter on the singular next_group_id and next_hop by code bnodes = [x for x in bnodes if x[ 'next_group_id'] is None and x['next_hop'] is None] if not bnodes: LOG.error( '1 branching path node was expected, ' 'but none were found.') elif len(bnodes) != 1: LOG.error( '1 branching path node was expected, ' 'but %d were found.', len(bnodes)) nsp = bnodes[0]['nsp'] nsi = bnodes[0]['nsi'] matches.add((nsp, nsi,)) flow_rule = self._build_bare_flow_rule( node, port, correlation, correlation) add_fcs, del_fcs = self._build_fcs_from_fc_ids( flow_rule, port_chain['flow_classifiers']) return self._extend_sg_dst_chain_flow_rule( flow_rule, True, matches, add_fcs, []) elif (node['node_type'] == ovs_const.SF_NODE and node['next_hop'] is None and node['next_group_id'] is None): # check if port_chain is a src_chain in a graph branches = sfc_plugin._get_branches( context, filters={'src_chain': [port_chain['id']]}) if branches: flow_rule = self._build_bare_flow_rule( node, port, correlation, correlation) add_fcs, del_fcs = self._build_fcs_from_fc_ids( flow_rule, port_chain['flow_classifiers']) return self._extend_sg_src_chain_flow_rule( flow_rule, True, add_fcs, []) return None def _build_bare_flow_rule(self, node, port, pc_corr, pp_corr): node_info = node.copy() node_info.pop('project_id') node_info.pop('portpair_details') port_info = port.copy() port_info.pop('project_id') port_info.pop('id') port_info.pop('path_nodes') flow_rule = dict(node_info, **port_info) flow_rule['pc_corr'] = pc_corr flow_rule['pp_corr'] = pc_corr # correlation becomes simply pp_corr flow_rule.pop('correlation') return flow_rule def _build_fcs_from_fc_ids(self, flow_rule, fc_ids): fcs = self._filter_flow_classifiers(flow_rule, fc_ids) add_fcs = [] del_fcs = [] for fc in fcs: add_fcs.append(fc) del_fcs.append(fc) return add_fcs, del_fcs def _extend_sg_src_chain_flow_rule( self, flow_rule, branch_point, add_fcs, del_fcs): # trigger chain-linking flow_rule['branch_point'] = branch_point flow_rule['add_fcs'] = add_fcs flow_rule['del_fcs'] = del_fcs return flow_rule def _extend_sg_dst_chain_flow_rule( self, flow_rule, on_add, matches, add_fcs=None, del_fcs=None): if add_fcs is not None: flow_rule['add_fcs'] = add_fcs if del_fcs is not None: flow_rule['del_fcs'] = del_fcs flow_rule['branch_info'] = {} flow_rule['branch_info']['on_add'] = on_add flow_rule['branch_info']['matches'] = matches return flow_rule def update_flowrule_status(self, context, id, status): """FIXME drivers/ovs/db.py will be removed in the future with 4 ovs tables This function raise: RuntimeError: reentrant call DBError: reentrant call DBConnectionError: (pymysql.err.OperationalError) (2014, 'Command Out of Sync')) """ pass # try: # flowrule_status = dict(status=status) # self.update_path_node(id, flowrule_status) # except Exception as e: # LOG.exception(e) # LOG.error("update_flowrule_status failed") def _update_portchain_group_reference_count(self, flow_rule, host): group_refcnt = 0 flow_rule['host'] = host if flow_rule['next_group_id'] is not None: all_nodes = self.get_path_nodes_by_filter( filters={'next_group_id': flow_rule['next_group_id'], 'nsi': 0xff}) if all_nodes is not None: for node in all_nodes: if not node['portpair_details']: if flow_rule['fwd_path'] == node['fwd_path']: group_refcnt += 1 port_details = self.get_port_details_by_filter( dict(host_id=flow_rule['host'])) if port_details is not None: for pd in port_details: for path in pd['path_nodes']: path_node = self.get_path_node(path['pathnode_id']) if ( path_node['next_group_id'] == flow_rule['next_group_id'] and path_node['fwd_path'] ): group_refcnt += 1 flow_rule['group_refcnt'] = group_refcnt return group_refcnt def _service_graph_linking_logic(self, context, create): service_graph = context.current # keep track of matches per dst chain (key) dc_branch_info = {} for src_chain_id in service_graph['port_chains']: sc_nodes = self.get_path_nodes_by_filter( dict(portchain_id=src_chain_id)) sc_nodes = sorted(sc_nodes, key=lambda n: n['nsi']) # dst_node is ignored when linking a chain to another chain if sc_nodes[0]['node_type'] == 'dst_node': del sc_nodes[0] # one of branch_info (it's for the current src chain), # sc_nodes[0] is simply the last sf_node the src chain branch_match = (sc_nodes[0]['nsp'], sc_nodes[0]['nsi'],) branches = 0 # for clarification: sc = source chain; dc = destination chain for dst_chain_id in service_graph['port_chains'][src_chain_id]: dc_nodes = self.get_path_nodes_by_filter( dict(portchain_id=dst_chain_id)) if dst_chain_id not in dc_branch_info: dc_branch_info[dst_chain_id] = {} dc_branch_info[dst_chain_id]['matches'] = [] dc_branch_info[dst_chain_id]['flow_rules'] = {} # trigger chain-linking using match of current src chain dc_branch_info[dst_chain_id]['matches'].append(branch_match) for node in dc_nodes: if node['node_type'] == ovs_const.SRC_NODE: dst_chain = context._plugin.get_port_chain( context._plugin_context, dst_chain_id) pc_corr = dst_chain[ 'chain_parameters']['correlation'] new_fcs = dst_chain['flow_classifiers'] for each in node['portpair_details']: port = self.get_port_detail_by_filter( dict(id=each)) node_info = node.copy() node_info.pop('project_id') node_info.pop('portpair_details') port_info = port.copy() port_info.pop('project_id') port_info.pop('id') port_info.pop('path_nodes') flow_rule = dict(node_info, **port_info) flow_rule['pc_corr'] = pc_corr # there's no correlation for src nodes flow_rule['pp_corr'] = None # correlation becomes simply pp_corr flow_rule.pop('correlation') old_fcs = self._filter_flow_classifiers(flow_rule, new_fcs) for old_fc in old_fcs: new_fc = old_fc.copy() if create: # dependent PCs should ignore LSPs new_fc['logical_source_port'] = None else: old_fc['logical_source_port'] = None if 'del_fcs' not in flow_rule: flow_rule['del_fcs'] = [] if 'add_fcs' not in flow_rule: flow_rule['add_fcs'] = [] flow_rule['del_fcs'].append(old_fc) flow_rule['add_fcs'].append(new_fc) # update next hop info self._update_path_node_next_hops(flow_rule) # usually we would ask_agent_to_update_flow_rules # here but "joining" branches require postponing dc_branch_info[ dst_chain_id]['flow_rules'][each] = flow_rule branches = branches + 1 if branches > 0: # this is supposed to always be true node = sc_nodes[0] if node['node_type'] == ovs_const.SF_NODE: src_chain = context._plugin.get_port_chain( context._plugin_context, src_chain_id) fc_ids = src_chain['flow_classifiers'] pc_corr = src_chain['chain_parameters']['correlation'] for each in node['portpair_details']: port = self.get_port_detail_by_filter(dict(id=each)) node_info = node.copy() port_info = port.copy() flow_rule = self._build_bare_flow_rule( node_info, port_info, pc_corr, pc_corr) add_fcs, del_fcs = self._build_fcs_from_fc_ids( flow_rule, fc_ids) flow_rule = self._extend_sg_src_chain_flow_rule( flow_rule, create, add_fcs, del_fcs) # update next hop info self._update_path_node_next_hops(flow_rule) # call agent self.ovs_driver_rpc.ask_agent_to_update_flow_rules( self.admin_context, flow_rule) self._update_agent_fdb_entries(context, flow_rule) else: LOG.warning('Expected branch point did not get any branches.') # at this point we know about all chain links/matches # (including "joining" branches), so let's call the agent for dst_chain_id in dc_branch_info: branch_matches = dc_branch_info[dst_chain_id]['matches'] flow_rules = dc_branch_info[dst_chain_id]['flow_rules'] for _, flow_rule in flow_rules.items(): flow_rule = self._extend_sg_dst_chain_flow_rule( flow_rule, create, branch_matches) # call agent self.ovs_driver_rpc.ask_agent_to_update_flow_rules( self.admin_context, flow_rule) self._update_agent_fdb_entries(context, flow_rule) @log_helpers.log_method_call def create_service_graph_precommit(self, context): pass @log_helpers.log_method_call def create_service_graph_postcommit(self, context): self._service_graph_linking_logic(context, True) @log_helpers.log_method_call def update_service_graph_precommit(self, context): pass @log_helpers.log_method_call def update_service_graph_postcommit(self, context): pass @log_helpers.log_method_call def delete_service_graph_precommit(self, context): pass @log_helpers.log_method_call def delete_service_graph_postcommit(self, context): self._service_graph_linking_logic(context, False) def _get_tap_port_ingress_flow(self, context, sfc_plugin, port_chain_flowrules, port_detail_list): for i, ports in enumerate(port_detail_list): nodes_assocs = ports['path_nodes'] for assoc in nodes_assocs: node = self.get_path_node(assoc['pathnode_id']) updated_node = self._update_tap_enabled_node(node) port_chain = sfc_plugin.get_port_chain( context, node['portchain_id']) flowrules = self._build_tap_ingress_flow( port_chain['chain_parameters']['correlation'], updated_node, node['fwd_path'], configure=False) port_chain_flowrules.extend(flowrules) return port_chain_flowrules def _get_egress_flowrule_if_next_hop_tap(self, sfc_plugin, context, port_chain_flowrules, port_detail_list): # Get the flow for Tap egress port. for i, ports in enumerate(port_detail_list): nodes_assocs = ports['path_nodes'] for assoc in nodes_assocs: node = self.get_path_node(assoc['pathnode_id']) if not node['next_hop']: return next_hops = jsonutils.loads(node['next_hop']) for next_hop in next_hops: if not next_hop['tap_enabled']: return tap_pp = self.get_port_detail_by_filter( dict(id=next_hop['portpair_id'])) for t_node in tap_pp['path_nodes']: tap_node = self.get_path_node(t_node['pathnode_id']) tap_node = self._update_tap_enabled_node(tap_node) if tap_node['fwd_path'] == node['fwd_path']: port_chain = sfc_plugin.get_port_chain( context, tap_node['portchain_id']) flowrule = self._build_portchain_flowrule_body( tap_node, ports, port_chain['chain_parameters']['correlation'], add_fc_ids=port_chain['flow_classifiers'] ) port_chain_flowrules.append(flowrule) def _validate_chain(self, context, port_chain, port_pair_groups, fwd_path): """Validate the cross-subnet restriction :param context: :param port_chain: :param port_pair_groups: :return: """ sf_path_length = len(port_pair_groups) ppgs = [context._plugin.get_port_pair_group(context._plugin_context, ppg) for ppg in port_pair_groups] # Detect cross-subnet transit # Compare subnets for logical source ports # and first PPG ingress ports for fc in self._get_fcs_by_ids(port_chain['flow_classifiers']): subnet1 = self._get_subnet_by_port(fc['logical_source_port']) cidr1 = subnet1['cidr'] ppg = ppgs[0] for pp_id1 in ppg['port_pairs']: pp1 = context._plugin.get_port_pair(context._plugin_context, pp_id1) filter1 = {} if pp1.get('ingress', None): filter1 = dict(dict(ingress=pp1['ingress']), **filter1) pd1 = self.get_port_detail_by_filter(filter1) subnet2 = self._get_subnet_by_port(pd1['ingress']) cidr2 = subnet2['cidr'] if cidr1 != cidr2: LOG.error('Cross-subnet chain not supported') raise exc.SfcDriverError( method='create_portchain_path') # Compare subnets for PPG egress ports # and next PPG ingress ports for i in range(sf_path_length - 1): ppg = ppgs[i] next_ppg = ppgs[i + 1] for pp_id1 in ppg['port_pairs']: pp1 = context._plugin.get_port_pair(context._plugin_context, pp_id1) filter1 = {} cidr3 = None if pp1.get('egress', None): filter1 = dict(dict(egress=pp1['egress']), **filter1) pd1 = self.get_port_detail_by_filter(filter1) subnet1 = self._get_subnet_by_port(pd1['egress']) cidr3 = subnet1['cidr'] for pp_id2 in next_ppg['port_pairs']: pp2 = context._plugin.get_port_pair( context._plugin_context, pp_id2) filter2 = {} if pp2.get('ingress', None): filter2 = dict(dict(ingress=pp2['ingress']), **filter2) pd2 = self.get_port_detail_by_filter(filter2) subnet2 = self._get_subnet_by_port(pd2['ingress']) cidr4 = subnet2['cidr'] if cidr3 != cidr4: LOG.error('Cross-subnet chain not supported') raise exc.SfcDriverError( method='create_portchain_path') ppgs = ppgs if fwd_path else list(reversed(ppgs)) return ppgs def _update_tap_enabled_node(self, node): if not node['tap_enabled'] or not node['previous_node_id']: return node prev_node = self.get_path_node(node['previous_node_id']) node_copy = node.copy() node_copy['prev_node'] = prev_node node_copy['tap_node'] = node node_copy['tap_pp_corr'] = self.get_port_detail( node['portpair_details'][0])['correlation'] node_copy.update(id=prev_node['id'], nsi=prev_node['nsi'], nsp=prev_node['nsp'], portpair_details=prev_node['portpair_details'], node_type=prev_node['node_type'], skip_ingress_flow_config=True) # Flags driver to # skip ingress flow configuration. return node_copy def _build_tap_ingress_flow(self, pc_corr, node, fwd_path, delete=False, configure=True): """Build add/delete ingress flows of Tap node. :param pc_corr: :param node: :param fwd_path: :param delete: :param configure: :return: """ if not node['tap_enabled']: return flowrule_list = list() # 'fwd_path' is always set to true. Call from this method to OVS agent # is made to either create/delete ingress flows explicitly. In ref. # sfc_driver.py - L134 swap ingress & egress flows if fwd_path set # to False, which is not required for Tap SF's ingress flows. flowrule = {'nsp': node['tap_node']['nsp'], 'nsi': node['tap_node']['nsi'], 'node_nsp': node['nsp'], 'node_nsi': node['nsi'], 'pc_corr': pc_corr, 'pp_corr': node['tap_pp_corr'], 'pp_corr_tap_nh': None, 'id': node['id'], 'fwd_path': True, 'node_type': node['node_type'], 'tap_nh_node_type': None, 'portchain_id': node['portchain_id'], } if node['tap_node']['next_hop']: next_hop = jsonutils.loads(node['tap_node']['next_hop']) nh_pp = self.get_port_detail(next_hop[0]['portpair_id']) flowrule.update(pp_corr_tap_nh=nh_pp['correlation'], tap_nh_node_type=ovs_const.SF_NODE) for each in node['prev_node']['portpair_details']: port = self.get_port_detail_by_filter(dict(id=each)) if not port: continue if fwd_path: flowrule.update(mac_address=port['mac_address']) else: flowrule.update( mac_address=(port['in_mac_address'] or port['mac_address'])) for pp_id in node['tap_node']['portpair_details']: _port = self.get_port_detail_by_filter(dict(id=pp_id)) if _port: # when Tap is launched with single port ingress_port = _port['ingress'] if fwd_path else ( _port['egress'] or _port['ingress']) flowrule.update(egress=None, ingress=ingress_port, host=_port['host_id'], pp_corr=_port['correlation'], tap_enabled=True) flowrule_list.append(flowrule.copy()) if configure: self._update_tap_ingress_flow_rules(flowrule_list, delete) else: return flowrule_list def _update_tap_ingress_flow_rules(self, flowrule_list, delete): for flowrule in flowrule_list: if not delete: self.ovs_driver_rpc.ask_agent_to_update_flow_rules( self.admin_context, flowrule) else: self.ovs_driver_rpc.ask_agent_to_delete_flow_rules( self.admin_context, flowrule ) def _is_tap_ports(self, port_detail_list): for port in port_detail_list: nodes_assocs = port['path_nodes'] for assoc in nodes_assocs: node = self.get_path_node(assoc['pathnode_id']) if node['tap_enabled']: return True return False def _update_sc_path_parameter(self, nodes): # This method updates 'nsi' of nodes in service chain. 'nsi' is # assigned to nodes in decreasing order. For eg., if the nodes are # like: SRC_NODE -> TAP_NODE -> DEFAULT_NODE -> DST_NODE - 'nsi' for # corresponding nodes will be - 255 -> 254 -> 253 -> 252 , where 254 # is of TAP_NODE. In this method, TAP_NODES 'nsi' are re-assigned # from the DST_NODE onward, for eg. the new TAP_NODE 'nsi' would be # 252 instead of 254, the resultant chain's 'nsi' - SRC_NODE (255) -> # TAP_NODE (252) -> DEFAULT_NODE (254) -> DST_NODE (253) . This was # done to simplify the OVS driver configuration, with the various # combination of correlation parameters of port-pairs. tap_sf_in_chain = False def_nodes, tap_nodes = list(), list() node_list = [None] * len(nodes) for node in nodes: node_list[0xff - node['nsi']] = node for node in node_list: if node['tap_enabled']: tap_sf_in_chain = True tap_nodes.append(node) else: def_nodes.append(node) if not tap_sf_in_chain: return nodes else: node_list = [def_nodes[0]] for i, node in enumerate(def_nodes): if i > 0: node_list.append(self.update_path_node( node['id'], dict(nsi=def_nodes[0]['nsi'] - i))) tap_nsi = def_nodes[0]['nsi'] - len(def_nodes) for node in tap_nodes: node_list.append(self.update_path_node(node['id'], dict(nsi=tap_nsi))) tap_nsi += 1 return node_list def _update_next_hop_details(self, flow_rule, port_detail, detail): # Update detail with next-hop node_type and the correlation of # port-pair of next-hop for node in port_detail['path_nodes']: _node = self.get_path_node(node['pathnode_id']) if flow_rule['fwd_path'] == _node['fwd_path']: detail['nsi'], detail['nsp'] = _node['nsi'], _node['nsp'] if _node['next_hop']: next_hops = jsonutils.loads(_node['next_hop']) for next_hop in next_hops: pp = self.get_port_detail(next_hop['portpair_id']) detail['pp_corr_tap_nh'] = pp['correlation'] for pp_node in pp['path_nodes']: pp_node_detail = self.get_path_node( pp_node['pathnode_id']) detail['tap_nh_node_type'] = pp_node_detail[ 'node_type'] return networking-sfc-10.0.0/networking_sfc/services/sfc/drivers/ovs/db.py0000664000175000017500000003412513656750333025436 0ustar zuulzuul00000000000000# Copyright 2017 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib import context as n_context from neutron_lib.db import api as db_api from neutron_lib.db import model_base from neutron_lib.db import model_query from neutron_lib.db import utils as db_utils from neutron_lib import exceptions as n_exc import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc from sqlalchemy import sql from oslo_utils import uuidutils from networking_sfc._i18n import _ class PortPairDetailNotFound(n_exc.NotFound): message = _("Portchain port brief %(port_id)s could not be found") class NodeNotFound(n_exc.NotFound): message = _("Portchain node %(node_id)s could not be found") # name changed to ChainPathId class UuidIntidAssoc(model_base.BASEV2, model_base.HasId): __tablename__ = 'sfc_uuid_intid_associations' uuid = sa.Column(sa.String(36), primary_key=True) intid = sa.Column(sa.Integer, unique=True, nullable=False) type_ = sa.Column(sa.String(32), nullable=False) def __init__(self, uuid, intid, type_): self.uuid = uuid self.intid = intid self.type_ = type_ def singleton(class_): instances = {} def getinstance(*args, **kwargs): if class_ not in instances: instances[class_] = class_(*args, **kwargs) return instances[class_] return getinstance class PathPortAssoc(model_base.BASEV2): """path port association table. It represents the association table which associate path_nodes with portpair_details. """ __tablename__ = 'sfc_path_port_associations' pathnode_id = sa.Column(sa.String(36), sa.ForeignKey( 'sfc_path_nodes.id', ondelete='CASCADE'), primary_key=True) portpair_id = sa.Column(sa.String(36), sa.ForeignKey('sfc_portpair_details.id', ondelete='CASCADE'), primary_key=True) weight = sa.Column(sa.Integer, nullable=False, default=1) class PortPairDetail(model_base.BASEV2, model_base.HasId, model_base.HasProject): __tablename__ = 'sfc_portpair_details' ingress = sa.Column(sa.String(36), nullable=True) egress = sa.Column(sa.String(36), nullable=True) host_id = sa.Column(sa.String(255), nullable=False) in_mac_address = sa.Column(sa.String(32)) mac_address = sa.Column(sa.String(32), nullable=False) network_type = sa.Column(sa.String(8)) segment_id = sa.Column(sa.Integer) local_endpoint = sa.Column(sa.String(64), nullable=False) path_nodes = orm.relationship(PathPortAssoc, backref='port_pair_detail', lazy="joined", cascade='all,delete') correlation = sa.Column(sa.String(255), nullable=True) class PathNode(model_base.BASEV2, model_base.HasId, model_base.HasProject): __tablename__ = 'sfc_path_nodes' nsp = sa.Column(sa.Integer, nullable=False) nsi = sa.Column(sa.Integer, nullable=False) node_type = sa.Column(sa.String(32)) portchain_id = sa.Column( sa.String(255), sa.ForeignKey('sfc_port_chains.id', ondelete='CASCADE')) status = sa.Column(sa.String(32)) portpair_details = orm.relationship(PathPortAssoc, backref='path_nodes', lazy="joined", cascade='all,delete') next_group_id = sa.Column(sa.Integer) next_hop = sa.Column(sa.String(512)) fwd_path = sa.Column(sa.Boolean(), nullable=False) ppg_n_tuple_mapping = sa.Column(sa.String(1024), nullable=True) tap_enabled = sa.Column(sa.Boolean(), nullable=False, server_default=sa.sql.false()) previous_node_id = sa.Column( sa.String(36), sa.ForeignKey('sfc_path_nodes.id', ondelete='SET NULL')) class OVSSfcDriverDB(object): def initialize(self): self.admin_context = n_context.get_admin_context() def _make_pathnode_dict(self, node, fields=None): res = {'id': node['id'], 'project_id': node['project_id'], 'node_type': node['node_type'], 'nsp': node['nsp'], 'nsi': node['nsi'], 'next_group_id': node['next_group_id'], 'next_hop': node['next_hop'], 'portchain_id': node['portchain_id'], 'status': node['status'], 'portpair_details': [pair_detail['portpair_id'] for pair_detail in node['portpair_details'] ], 'fwd_path': node['fwd_path'], 'ppg_n_tuple_mapping': node['ppg_n_tuple_mapping'], 'tap_enabled': node['tap_enabled'], 'previous_node_id': node['previous_node_id'] } return db_utils.resource_fields(res, fields) def _make_port_detail_dict(self, port, fields=None): res = {'id': port['id'], 'project_id': port['project_id'], 'host_id': port['host_id'], 'ingress': port.get('ingress', None), 'egress': port.get('egress', None), 'segment_id': port['segment_id'], 'local_endpoint': port['local_endpoint'], 'mac_address': port['mac_address'], 'in_mac_address': port['in_mac_address'], 'network_type': port['network_type'], 'path_nodes': [{'pathnode_id': node['pathnode_id'], 'weight': node['weight']} for node in port['path_nodes']], 'correlation': port['correlation'] } return db_utils.resource_fields(res, fields) def _make_pathport_assoc_dict(self, assoc, fields=None): res = {'pathnode_id': assoc['pathnode_id'], 'portpair_id': assoc['portpair_id'], 'weight': assoc['weight'], } return db_utils.resource_fields(res, fields) def _get_path_node(self, id): try: node = model_query.get_by_id(self.admin_context, PathNode, id) except exc.NoResultFound: raise NodeNotFound(node_id=id) return node def _get_port_pair_detail(self, id): try: port = model_query.get_by_id( self.admin_context, PortPairDetail, id) except exc.NoResultFound: raise PortPairDetailNotFound(port_id=id) return port def create_port_pair_detail(self, port): with db_api.CONTEXT_WRITER.using(self.admin_context): args = db_utils.filter_non_model_columns(port, PortPairDetail) args['id'] = uuidutils.generate_uuid() port_obj = PortPairDetail(**args) self.admin_context.session.add(port_obj) return self._make_port_detail_dict(port_obj) def create_path_node(self, node): with db_api.CONTEXT_WRITER.using(self.admin_context): args = db_utils.filter_non_model_columns(node, PathNode) args['id'] = uuidutils.generate_uuid() node_obj = PathNode(**args) self.admin_context.session.add(node_obj) return self._make_pathnode_dict(node_obj) def create_pathport_assoc(self, assoc): with db_api.CONTEXT_WRITER.using(self.admin_context): args = db_utils.filter_non_model_columns(assoc, PathPortAssoc) assoc_obj = PathPortAssoc(**args) self.admin_context.session.add(assoc_obj) return self._make_pathport_assoc_dict(assoc_obj) def delete_pathport_assoc(self, pathnode_id, portdetail_id): with db_api.CONTEXT_WRITER.using(self.admin_context): self.admin_context.session.query(PathPortAssoc).filter_by( pathnode_id=pathnode_id, portpair_id=portdetail_id).delete() def update_port_detail(self, id, port): with db_api.CONTEXT_WRITER.using(self.admin_context): port_obj = self._get_port_detail(id) for key, value in port.items(): if key == 'path_nodes': pns = [] for pn in value: pn_id = pn['pathnode_id'] self._get_path_node(pn_id) query = model_query.query_with_hooks( self.admin_context, PathPortAssoc) pn_association = query.filter_by( pathnode_id=pn_id, portpair_id=id ).first() if not pn_association: pn_association = PathPortAssoc( pathnode_id=pn_id, portpair_id=id, weight=pn.get('weight', 1) ) pns.append(pn_association) port_obj[key] = pns else: port_obj[key] = value port_obj.update(port) return self._make_port_detail_dict(port_obj) def update_path_node(self, id, node): with db_api.CONTEXT_WRITER.using(self.admin_context): node_obj = self._get_path_node(id) for key, value in node.items(): if key == 'portpair_details': pds = [] for pd_id in value: query = model_query.query_with_hooks( self.admin_context, PathPortAssoc) pd_association = query.filter_by( pathnode_id=id, portpair_id=pd_id ).first() if not pd_association: pd_association = PathPortAssoc( pathnode_id=id, portpair_id=pd_id ) pds.append(pd_association) node_obj[key] = pds else: node_obj[key] = value return self._make_pathnode_dict(node_obj) def delete_port_pair_detail(self, id): with db_api.CONTEXT_WRITER.using(self.admin_context): port_obj = self._get_port_pair_detail(id) self.admin_context.session.delete(port_obj) def delete_path_node(self, id): with db_api.CONTEXT_WRITER.using(self.admin_context): node_obj = self._get_path_node(id) self.admin_context.session.delete(node_obj) def get_port_detail(self, id): with db_api.CONTEXT_READER.using(self.admin_context): port_obj = self._get_port_pair_detail(id) return self._make_port_detail_dict(port_obj) def get_port_detail_without_exception(self, id): with db_api.CONTEXT_READER.using(self.admin_context): try: port = model_query.get_by_id( self.admin_context, PortPairDetail, id) except exc.NoResultFound: return None return self._make_port_detail_dict(port) def get_path_node(self, id): with db_api.CONTEXT_READER.using(self.admin_context): node_obj = self._get_path_node(id) return self._make_pathnode_dict(node_obj) def get_path_nodes_by_filter(self, filters=None): with db_api.CONTEXT_READER.using(self.admin_context): qry = self._get_path_nodes_by_filter(filters) all_items = qry.all() if all_items: return [self._make_pathnode_dict(item) for item in all_items] return None def get_path_node_by_filter(self, filters=None): with db_api.CONTEXT_READER.using(self.admin_context): qry = self._get_path_nodes_by_filter(filters) first = qry.first() if first: return self._make_pathnode_dict(first) return None def _get_path_nodes_by_filter(self, filters=None): qry = self.admin_context.session.query(PathNode) if filters: for key, value in filters.items(): column = getattr(PathNode, key, None) if column: if not value: qry = qry.filter(sql.false()) else: qry = qry.filter(column == value) return qry def get_port_details_by_filter(self, filters=None): with db_api.CONTEXT_READER.using(self.admin_context): qry = self._get_port_details_by_filter(filters) all_items = qry.all() if all_items: return [self._make_port_detail_dict(item) for item in all_items] return None def get_port_detail_by_filter(self, filters=None): with db_api.CONTEXT_READER.using(self.admin_context): qry = self._get_port_details_by_filter(filters) first = qry.first() if first: return self._make_port_detail_dict(first) return None def _get_port_details_by_filter(self, filters=None): qry = self.admin_context.session.query(PortPairDetail) if filters: for key, value in filters.items(): column = getattr(PortPairDetail, key, None) if column: if not value: qry = qry.filter(sql.false()) else: qry = qry.filter(column == value) return qry networking-sfc-10.0.0/networking_sfc/services/sfc/drivers/ovs/constants.py0000664000175000017500000000304313656750333027060 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_const STATUS_BUILDING = 'building' STATUS_ACTIVE = 'active' STATUS_ERROR = 'error' SRC_NODE = 'src_node' DST_NODE = 'dst_node' SF_NODE = 'sf_node' INSERTION_TYPE_L2 = 'l2' INSERTION_TYPE_L3 = 'l3' INSERTION_TYPE_BITW = 'bitw' INSERTION_TYPE_TAP = 'tap' MAX_HASH = 16 INSERTION_TYPE_DICT = { n_const.DEVICE_OWNER_ROUTER_HA_INTF: INSERTION_TYPE_L3, n_const.DEVICE_OWNER_ROUTER_INTF: INSERTION_TYPE_L3, n_const.DEVICE_OWNER_ROUTER_GW: INSERTION_TYPE_L3, n_const.DEVICE_OWNER_FLOATINGIP: INSERTION_TYPE_L3, n_const.DEVICE_OWNER_DHCP: INSERTION_TYPE_TAP, n_const.DEVICE_OWNER_DVR_INTERFACE: INSERTION_TYPE_L3, n_const.DEVICE_OWNER_AGENT_GW: INSERTION_TYPE_L3, n_const.DEVICE_OWNER_ROUTER_SNAT: INSERTION_TYPE_TAP, n_const.DEVICE_OWNER_LOADBALANCER: INSERTION_TYPE_TAP, 'compute': INSERTION_TYPE_L2 } ETH_TYPE_IP = 0x0800 ETH_TYPE_MPLS = 0x8847 ETH_TYPE_NSH = 0x894f networking-sfc-10.0.0/networking_sfc/services/sfc/drivers/ovs/rpc_topics.py0000664000175000017500000000136513656750333027216 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. AGENT = 'q-agent-notifier' SFC_PLUGIN = 'q-sfc-plugin' SFC_AGENT = 'q-sfc-agent' SFC_FLOW = 'q-sfc-flow' PORTFLOW = 'portflowrule' networking-sfc-10.0.0/networking_sfc/services/sfc/drivers/base.py0000664000175000017500000000657713656750333025166 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # Copyright 2017 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class SfcDriverBaseLegacy(object): """SFC Driver Base Class for legacy interface.""" @abc.abstractmethod def create_port_chain(self, context): pass @abc.abstractmethod def update_port_chain(self, context): pass @abc.abstractmethod def create_port_pair(self, context): pass @abc.abstractmethod def update_port_pair(self, context): pass @abc.abstractmethod def create_port_pair_group(self, context): pass @abc.abstractmethod def update_port_pair_group(self, context): pass @six.add_metaclass(abc.ABCMeta) class SfcDriverBase(SfcDriverBaseLegacy): """SFC Driver Base Class.""" def create_port_chain_precommit(self, context): pass def create_port_chain_postcommit(self, context): self.create_port_chain(context) @abc.abstractmethod def delete_port_chain(self, context): pass def delete_port_chain_precommit(self, context): pass def delete_port_chain_postcommit(self, context): pass def update_port_chain_precommit(self, context): pass def update_port_chain_postcommit(self, context): self.update_port_chain(context) def create_port_pair_precommit(self, context): pass def create_port_pair_postcommit(self, context): self.create_port_pair(context) @abc.abstractmethod def delete_port_pair(self, context): pass def delete_port_pair_precommit(self, context): pass def delete_port_pair_postcommit(self, context): pass def update_port_pair_precommit(self, context): pass def update_port_pair_postcommit(self, context): self.update_port_pair(context) def create_port_pair_group_precommit(self, context): pass def create_port_pair_group_postcommit(self, context): self.create_port_pair_group(context) @abc.abstractmethod def delete_port_pair_group(self, context): pass def delete_port_pair_group_precommit(self, context): pass def delete_port_pair_group_postcommit(self, context): pass def update_port_pair_group_precommit(self, context): pass def update_port_pair_group_postcommit(self, context): self.update_port_pair_group(context) def create_service_graph_precommit(self, context): pass def create_service_graph_postcommit(self, context): pass def update_service_graph_precommit(self, context): pass def update_service_graph_postcommit(self, context): pass def delete_service_graph_precommit(self, context): pass def delete_service_graph_postcommit(self, context): pass networking-sfc-10.0.0/networking_sfc/services/sfc/common/0000775000175000017500000000000013656750461023477 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/sfc/common/__init__.py0000664000175000017500000000000013656750333025574 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/sfc/common/config.py0000664000175000017500000000176713656750333025327 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from networking_sfc._i18n import _ SFC_DRIVER_OPTS = [ cfg.ListOpt('drivers', default=['dummy'], help=_("An ordered list of service chain drivers " "entrypoints to be loaded from the " "networking_sfc.sfc.drivers namespace.")), ] cfg.CONF.register_opts(SFC_DRIVER_OPTS, "sfc") networking-sfc-10.0.0/networking_sfc/services/sfc/common/ovs_ext_lib.py0000664000175000017500000001027113656750333026365 0ustar zuulzuul00000000000000# Copyright 2015 Huawei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.common import ovs_lib from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ as ovs_consts from neutron_lib import exceptions from oslo_log import log as logging from networking_sfc._i18n import _ LOG = logging.getLogger(__name__) def get_port_mask(min_port, max_port): """get port/mask serial by port range.""" if min_port < 1 or max_port > 0xffff or min_port > max_port: msg = _("the port range is invalid") raise exceptions.InvalidInput(error_message=msg) masks = [] while min_port <= max_port: mask = 0xffff while mask != 0: next_mask = (mask << 1) & 0xffff port_start = min_port & next_mask port_end = min_port + (next_mask ^ 0xffff) if port_start == min_port and port_end <= max_port: mask = next_mask else: break masks.append('0x%x/0x%x' % (min_port, mask)) min_port = min_port + (mask ^ 0xffff) + 1 return masks class SfcOVSBridgeExt(object): def __init__(self, ovs_bridge): self.bridge = ovs_bridge # OpenFlow 1.3 is needed to manipulate groups # To support NSH feature, OpenFlow 1.3 is also needed self.bridge.use_at_least_protocol(ovs_consts.OPENFLOW13) # proxy most methods to self.bridge def __getattr__(self, name): return getattr(self.bridge, name) def do_action_groups(self, action, kwargs_list): group_strs = [_build_group_expr_str(kw, action) for kw in kwargs_list] if action == 'add' or action == 'del': cmd = '%s-groups' % action elif action == 'mod': cmd = '%s-group' % action else: msg = _("Action is illegal") raise exceptions.InvalidInput(error_message=msg) self.run_ofctl(cmd, ['-'], '\n'.join(group_strs)) def add_group(self, **kwargs): self.do_action_groups('add', [kwargs]) def mod_group(self, **kwargs): self.do_action_groups('mod', [kwargs]) def delete_group(self, **kwargs): self.do_action_groups('del', [kwargs]) def dump_group_for_id(self, group_id): retval = None group_str = "%d" % group_id group = self.run_ofctl("dump-groups", [group_str]) if group: retval = '\n'.join(item for item in group.splitlines() if ovs_lib.is_a_flow_line(item)) return retval def get_bridge_ports(self): port_name_list = self.bridge.get_port_name_list() of_portno_list = list() for port_name in port_name_list: of_portno_list.append(self.bridge.get_port_ofport(port_name)) return of_portno_list def _build_group_expr_str(group_dict, cmd): group_expr_arr = [] buckets = None group_id = None if cmd != 'del': if "group_id" not in group_dict: msg = _("Must specify one group Id on group addition" " or modification") raise exceptions.InvalidInput(error_message=msg) group_id = "group_id=%s" % group_dict.pop('group_id') if "buckets" not in group_dict: msg = _("Must specify one or more buckets on group addition" " or modification") raise exceptions.InvalidInput(error_message=msg) buckets = "%s" % group_dict.pop('buckets') if group_id: group_expr_arr.append(group_id) for key, value in group_dict.items(): group_expr_arr.append("%s=%s" % (key, value)) if buckets: group_expr_arr.append(buckets) return ','.join(group_expr_arr) networking-sfc-10.0.0/networking_sfc/services/sfc/common/exceptions.py0000664000175000017500000000300313656750333026224 0ustar zuulzuul00000000000000# Copyright 2017 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Exceptions used by SFC plugin and drivers.""" from neutron_lib import exceptions from networking_sfc._i18n import _ class SfcDriverError(exceptions.NeutronException): """SFC driver call failed.""" message = _("%(method)s failed.") class SfcException(exceptions.NeutronException): """Base for SFC driver exceptions returned to user.""" pass class SfcBadRequest(exceptions.BadRequest, SfcException): """Base for SFC driver bad request exceptions returned to user.""" message = _("%(message)s") class SfcNoSubnetGateway(SfcDriverError): """No subnet gateway.""" message = _("There is no %(type)s of ip prefix %(cidr)s.") class SfcNoSuchSubnet(SfcDriverError): """No such subnet.""" message = _("There is no %(type)s of %(cidr)s.") class FlowClassifierInvalid(SfcDriverError): """Invalid flow classifier.""" message = _("There is no %(type)s assigned.") networking-sfc-10.0.0/networking_sfc/services/sfc/common/context.py0000664000175000017500000000614513656750333025541 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class SfcPluginContext(object): """SFC context base class.""" def __init__(self, plugin, plugin_context): self._plugin = plugin self._plugin_context = plugin_context class PortChainContext(SfcPluginContext): def __init__(self, plugin, plugin_context, portchain, original_portchain=None): super(PortChainContext, self).__init__(plugin, plugin_context) self._portchain = portchain self._original_portchain = original_portchain @property def current(self): return self._portchain @property def original(self): return self._original_portchain class FlowClassifierContext(SfcPluginContext): def __init__(self, plugin, plugin_context, flowclassifier, original_flowclassifier=None): super(FlowClassifierContext, self).__init__(plugin, plugin_context) self._flowclassifier = flowclassifier self._original_flowclassifier = original_flowclassifier @property def current(self): return self._flowclassifier @property def original(self): return self._original_flowclassifier class PortPairContext(SfcPluginContext): def __init__(self, plugin, plugin_context, portpair, original_portpair=None): super(PortPairContext, self).__init__(plugin, plugin_context) self._portpair = portpair self._original_portpair = original_portpair @property def current(self): return self._portpair @property def original(self): return self._original_portpair class PortPairGroupContext(SfcPluginContext): def __init__(self, plugin, plugin_context, portpairgroup, original_portpairgroup=None): super(PortPairGroupContext, self).__init__(plugin, plugin_context) self._portpairgroup = portpairgroup self._original_portpairgroup = original_portpairgroup @property def current(self): return self._portpairgroup @property def original(self): return self._original_portpairgroup class ServiceGraphContext(SfcPluginContext): def __init__(self, plugin, plugin_context, service_graph, original_graph=None): super(ServiceGraphContext, self).__init__(plugin, plugin_context) self._service_graph = service_graph self._original_graph = original_graph @property def current(self): return self._service_graph @property def original(self): return self._original_graph networking-sfc-10.0.0/networking_sfc/services/sfc/driver_manager.py0000664000175000017500000001716413656750333025555 0ustar zuulzuul00000000000000# Copyright 2017 Futurewei. All rights reserved. # Copyright 2017 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from stevedore.named import NamedExtensionManager from networking_sfc.services.sfc.common import exceptions as sfc_exc LOG = log.getLogger(__name__) cfg.CONF.import_opt('drivers', 'networking_sfc.services.sfc.common.config', group='sfc') class SfcDriverManager(NamedExtensionManager): """Implementation of SFC drivers.""" def __init__(self, namespace='networking_sfc.sfc.drivers', names=cfg.CONF.sfc.drivers): # Registered sfc drivers, keyed by name. self.drivers = {} # Ordered list of sfc drivers, defining # the order in which the drivers are called. self.ordered_drivers = [] LOG.info("Configured SFC drivers: %s", names) super(SfcDriverManager, self).__init__(namespace, names, invoke_on_load=True, name_order=True) LOG.info("Loaded SFC drivers: %s", self.names()) self._register_drivers() @classmethod def make_test_instance(cls, extensions, namespace='TESTING'): """Construct a test SfcDriverManager Test instances are passed a list of extensions to use rather than loading them from entry points. :param extensions: Pre-configured Extension instances :type extensions: list of :class:`~stevedore.extension.Extension` :param namespace: The namespace for the manager; used only for identification since the extensions are passed in. :type namespace: str :return: The manager instance, initialized for testing """ o = super(SfcDriverManager, cls).make_test_instance( extensions, namespace=namespace) o.drivers = {} o.ordered_drivers = [] o._register_drivers() return o def _register_drivers(self): """Register all SFC drivers. This method should only be called once in the SfcDriverManager constructor. """ for ext in self: self.drivers[ext.name] = ext self.ordered_drivers.append(ext) LOG.info("Registered SFC drivers: %s", [driver.name for driver in self.ordered_drivers]) def initialize(self): # ServiceChain bulk operations requires each driver to support them self.native_bulk_support = True for driver in self.ordered_drivers: LOG.info("Initializing SFC driver '%s'", driver.name) driver.obj.initialize() self.native_bulk_support &= getattr(driver.obj, 'native_bulk_support', True) def _call_drivers(self, method_name, context, raise_orig_exc=False): """Helper method for calling a method across all SFC drivers. :param method_name: name of the method to call :param context: context parameter to pass to each method call :param raise_orig_exc: whether or not to raise the original driver exception, or use a general one """ for driver in self.ordered_drivers: try: getattr(driver.obj, method_name)(context) except Exception as e: # This is an internal failure. LOG.exception(e) LOG.error( "SFC driver '%(name)s' failed in %(method)s", {'name': driver.name, 'method': method_name} ) if raise_orig_exc: raise else: raise sfc_exc.SfcDriverError( method=method_name ) def create_port_chain_precommit(self, context): self._call_drivers("create_port_chain_precommit", context, raise_orig_exc=True) def create_port_chain_postcommit(self, context): self._call_drivers("create_port_chain_postcommit", context) def update_port_chain_precommit(self, context): self._call_drivers("update_port_chain_precommit", context) def update_port_chain_postcommit(self, context): self._call_drivers("update_port_chain_postcommit", context) def delete_port_chain(self, context): self._call_drivers("delete_port_chain", context) def delete_port_chain_precommit(self, context): self._call_drivers("delete_port_chain_precommit", context) def delete_port_chain_postcommit(self, context): self._call_drivers("delete_port_chain_postcommit", context) def create_port_pair_precommit(self, context): self._call_drivers("create_port_pair_precommit", context) def create_port_pair_postcommit(self, context): self._call_drivers("create_port_pair_postcommit", context) def update_port_pair_precommit(self, context): self._call_drivers("update_port_pair_precommit", context) def update_port_pair_postcommit(self, context): self._call_drivers("update_port_pair_postcommit", context) def delete_port_pair(self, context): self._call_drivers("delete_port_pair", context) def delete_port_pair_precommit(self, context): self._call_drivers("delete_port_pair_precommit", context) def delete_port_pair_postcommit(self, context): self._call_drivers("delete_port_pair_postcommit", context) def create_port_pair_group_precommit(self, context): self._call_drivers("create_port_pair_group_precommit", context) def create_port_pair_group_postcommit(self, context): self._call_drivers("create_port_pair_group_postcommit", context) def update_port_pair_group_precommit(self, context): self._call_drivers("update_port_pair_group_precommit", context) def update_port_pair_group_postcommit(self, context): self._call_drivers("update_port_pair_group_postcommit", context) def delete_port_pair_group(self, context): self._call_drivers("delete_port_pair_group", context) def delete_port_pair_group_precommit(self, context): self._call_drivers("delete_port_pair_group_precommit", context) def delete_port_pair_group_postcommit(self, context): self._call_drivers("delete_port_pair_group_postcommit", context) def create_service_graph_precommit(self, context): self._call_drivers("create_service_graph_precommit", context) def create_service_graph_postcommit(self, context): self._call_drivers("create_service_graph_postcommit", context) def update_service_graph_precommit(self, context): self._call_drivers("update_service_graph_precommit", context) def update_service_graph_postcommit(self, context): self._call_drivers("update_service_graph_postcommit", context) def delete_service_graph_precommit(self, context): self._call_drivers("delete_service_graph_precommit", context) def delete_service_graph_postcommit(self, context): self._call_drivers("delete_service_graph_postcommit", context) networking-sfc-10.0.0/networking_sfc/services/sfc/agent/0000775000175000017500000000000013656750461023305 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/sfc/agent/__init__.py0000664000175000017500000000011013656750333025404 0ustar zuulzuul00000000000000from neutron.common import eventlet_utils eventlet_utils.monkey_patch() networking-sfc-10.0.0/networking_sfc/services/sfc/agent/extensions/0000775000175000017500000000000013656750461025504 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/sfc/agent/extensions/__init__.py0000664000175000017500000000000013656750333027601 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/sfc/agent/extensions/openvswitch/0000775000175000017500000000000013656750461030055 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/sfc/agent/extensions/openvswitch/sfc_driver.py0000664000175000017500000011501413656750333032555 0ustar zuulzuul00000000000000# Copyright 2015 Huawei. # Copyright 2016 Red Hat, Inc. # Copyright 2017 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib import constants as n_consts from oslo_config import cfg from oslo_log import log as logging from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ as ovs_consts from neutron.plugins.ml2.drivers.openvswitch.agent import vlanmanager from networking_sfc.services.sfc.agent.extensions import sfc from networking_sfc.services.sfc.common import ovs_ext_lib from networking_sfc.services.sfc.drivers.ovs import constants LOG = logging.getLogger(__name__) cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.' 'common.config') # This table is used to process the traffic across differet subnet scenario. # Flow 1: pri=1, ip,dl_dst=nexthop_mac,nw_src=nexthop_subnet. actions= # push_mpls:0x8847,set_mpls_label,set_mpls_ttl,push_vlan,output:(patch port # or resubmit to table(INGRESS_TABLE) # Flow 2: pri=0, ip,dl_dst=nexthop_mac,, action=push_mpls:0x8847, # set_mpls_label,set_mpls_ttl,push_vlan,output:(patch port or resubmit to # table(INGRESS_TABLE) ACROSS_SUBNET_TABLE = 5 # The table has multiple flows that steer traffic for the different chains # to the ingress port of different service functions hosted on this Compute # node. INGRESS_TABLE = 10 # port chain default flow rule priority PC_DEF_PRI = 20 PC_INGRESS_PRI = 30 # Reverse group number offset for dump_group REVERSE_GROUP_NUMBER_OFFSET = 7000 TAP_CLASSIFIER_TABLE = 7 # This table floods TAP packets on tunnel ports TAP_TUNNEL_OUTPUT_TABLE = 25 # actions RESUBMIT_TAP_TABLE = ',resubmit(,%s)' % TAP_CLASSIFIER_TABLE NORMAL_ACTION = ",NORMAL" class SfcOVSAgentDriver(sfc.SfcAgentDriver): """This class will support MPLS frame Ethernet + MPLS IPv4 Packet: +-------------------------------+---------------+--------------------+ |Outer Ethernet, ET=0x8847 | MPLS head, | original IP Packet | +-------------------------------+---------------+--------------------+ """ def __init__(self): super(SfcOVSAgentDriver, self).__init__() self.agent_api = None self.br_int = None self.br_tun = None self.local_ip = None self.patch_tun_ofport = None self.vlan_manager = None def consume_api(self, agent_api): self.agent_api = agent_api def initialize(self): self.br_int = ovs_ext_lib.SfcOVSBridgeExt( self.agent_api.request_int_br()) self.local_ip = cfg.CONF.OVS.local_ip self.patch_tun_ofport = self.br_int.get_port_ofport( cfg.CONF.OVS.int_peer_patch_port) self.vlan_manager = vlanmanager.LocalVlanManager() self._clear_sfc_flow_on_int_br() self.br_tun = ovs_ext_lib.SfcOVSBridgeExt( self.agent_api.request_tun_br()) self.patch_int_ofport = self.br_tun.get_port_ofport( cfg.CONF.OVS.tun_peer_patch_port) self._clear_sfc_flow_on_tun_br() def update_flow_rules(self, flowrule, flowrule_status): if flowrule['fwd_path'] is False and flowrule['node_type'] == \ 'sf_node': flowrule['ingress'], flowrule['egress'] = flowrule['egress'], \ flowrule['ingress'] try: LOG.debug('update_flow_rule, flowrule = %s', flowrule) if flowrule.get('egress', None): self._setup_egress_flow_rules(flowrule) if flowrule.get('ingress', None) and not flowrule.get( 'skip_ingress_flow_config', None): self._setup_ingress_flow_rules(flowrule) flowrule_status_temp = {'id': flowrule['id'], 'status': constants.STATUS_ACTIVE} flowrule_status.append(flowrule_status_temp) except Exception as e: flowrule_status_temp = {'id': flowrule['id'], 'status': constants.STATUS_ERROR} flowrule_status.append(flowrule_status_temp) LOG.exception(e) LOG.error("update_flow_rules failed") def delete_flow_rule(self, flowrule, flowrule_status): if flowrule['fwd_path'] is False and flowrule['node_type'] == \ 'sf_node': flowrule['ingress'], flowrule['egress'] = flowrule['egress'], \ flowrule['ingress'] try: LOG.debug("delete_flow_rule, flowrule = %s", flowrule) pc_corr = flowrule['pc_corr'] # delete tunnel table flow rule on br-int(egress match) if flowrule['egress'] is not None: self._setup_local_switch_flows_on_int_br(flowrule, flowrule['del_fcs'], None, add_flow=False, match_inport=True) # delete group table, need to check again group_id = flowrule.get('next_group_id', None) if group_id and flowrule.get('group_refcnt', None) <= 1: if flowrule['fwd_path']: self.br_int.delete_group(group_id=group_id) else: self.br_int.delete_group(group_id=group_id + REVERSE_GROUP_NUMBER_OFFSET) self._delete_across_subnet_table_flows(flowrule) if flowrule['ingress'] is not None: # delete table INGRESS_TABLE ingress match flow rule # on br-int(ingress match) vif_port = self.br_int.get_vif_port_by_id(flowrule['ingress']) if vif_port: # third, install br-int flow rule on table INGRESS_TABLE # for ingress traffic if pc_corr == 'mpls': self._delete_flows_mpls(flowrule, vif_port) elif pc_corr == 'nsh': self._delete_flows_nsh(flowrule, vif_port) except Exception as e: flowrule_status_temp = {'id': flowrule['id'], 'status': constants.STATUS_ERROR} flowrule_status.append(flowrule_status_temp) LOG.exception(e) LOG.error("delete_flow_rule failed") def _clear_sfc_flow_on_int_br(self): self.br_int.delete_group(group_id='all') self.br_int.delete_flows(table=ACROSS_SUBNET_TABLE) self.br_int.delete_flows(table=INGRESS_TABLE) self.br_int.delete_flows(table=TAP_CLASSIFIER_TABLE) self.br_int.install_goto(dest_table_id=INGRESS_TABLE, priority=PC_DEF_PRI, eth_type=constants.ETH_TYPE_MPLS) self.br_int.install_goto(dest_table_id=INGRESS_TABLE, priority=PC_DEF_PRI, eth_type=constants.ETH_TYPE_NSH) self.br_int.install_drop(table_id=INGRESS_TABLE) def _parse_flow_classifier(self, flow_classifier): eth_type, nw_proto, source_port_masks, destination_port_masks = ( (None, ) * 4) if (not flow_classifier['source_port_range_min'] and not flow_classifier['source_port_range_max']): # wildcard source_port_masks = ['0/0x0'] elif not flow_classifier['source_port_range_min']: source_port_masks = ovs_ext_lib.get_port_mask( 1, flow_classifier['source_port_range_max']) elif not flow_classifier['source_port_range_max']: source_port_masks = ovs_ext_lib.get_port_mask( flow_classifier['source_port_range_min'], 65535) else: source_port_masks = ovs_ext_lib.get_port_mask( flow_classifier['source_port_range_min'], flow_classifier['source_port_range_max']) if (not flow_classifier['destination_port_range_min'] and not flow_classifier['destination_port_range_max']): # wildcard destination_port_masks = ['0/0x0'] elif not flow_classifier['destination_port_range_min']: destination_port_masks = ovs_ext_lib.get_port_mask( 1, flow_classifier['destination_port_range_max']) elif not flow_classifier['destination_port_range_max']: destination_port_masks = ovs_ext_lib.get_port_mask( flow_classifier['destination_port_range_min'], 65535) else: destination_port_masks = ovs_ext_lib.get_port_mask( flow_classifier['destination_port_range_min'], flow_classifier['destination_port_range_max']) if flow_classifier['ethertype'] == "IPv4": eth_type = constants.ETH_TYPE_IP if n_consts.PROTO_NAME_TCP == flow_classifier['protocol']: nw_proto = n_consts.PROTO_NUM_TCP elif n_consts.PROTO_NAME_UDP == flow_classifier['protocol']: nw_proto = n_consts.PROTO_NUM_UDP elif n_consts.PROTO_NAME_ICMP == flow_classifier['protocol']: nw_proto = n_consts.PROTO_NUM_ICMP else: nw_proto = None elif flow_classifier['ethertype'] == "IPv6": LOG.error("Current portchain agent doesn't support IPv6") else: LOG.error("invalid protocol input") return (eth_type, nw_proto, source_port_masks, destination_port_masks) def _get_flow_infos_from_flow_classifier(self, flow_classifier, flowrule): flow_infos = [] nw_src, nw_dst, tp_src, tp_dst = ((None, ) * 4) if flow_classifier['ethertype'] != "IPv4": LOG.error("Current portchain agent only supports IPv4") return flow_infos # parse and transfer flow info to match field info eth_type, nw_proto, source_port_masks, destination_port_masks = ( self._parse_flow_classifier(flow_classifier)) if flowrule['fwd_path']: if flow_classifier['source_ip_prefix']: nw_src = flow_classifier['source_ip_prefix'] else: nw_src = '0.0.0.0/0.0.0.0' if flow_classifier['destination_ip_prefix']: nw_dst = flow_classifier['destination_ip_prefix'] else: nw_dst = '0.0.0.0/0.0.0.0' else: if flow_classifier['source_ip_prefix']: nw_src = flow_classifier['destination_ip_prefix'] else: nw_src = '0.0.0.0/0.0.0.0' if flow_classifier['destination_ip_prefix']: nw_dst = flow_classifier['source_ip_prefix'] else: nw_dst = '0.0.0.0/0.0.0.0' if source_port_masks and destination_port_masks: for destination_port in destination_port_masks: for source_port in source_port_masks: if flowrule['fwd_path']: tp_src = '%s' % source_port tp_dst = '%s' % destination_port else: tp_dst = '%s' % source_port tp_src = '%s' % destination_port flow_info = {'eth_type': eth_type, 'nw_src': nw_src, 'nw_dst': nw_dst, 'tp_src': tp_src, 'tp_dst': tp_dst} if nw_proto: flow_info['nw_proto'] = nw_proto flow_infos.append(flow_info) return flow_infos def _get_flow_infos_from_flow_classifier_list(self, flow_classifier_list, flowrule): flow_infos = [] if not flow_classifier_list: return flow_infos for flow_classifier in flow_classifier_list: flow_infos.extend( self._get_flow_infos_from_flow_classifier(flow_classifier, flowrule)) return flow_infos def _match_by_header(self, match_info, nsp, nsi): match_info['reg0'] = (nsp << 8) | nsi # on header-matching there's no in_port match_info.pop('in_port', None) def _setup_local_switch_flows_on_int_br(self, flowrule, flow_classifier_list, actions, add_flow=True, match_inport=True): inport_match = {} priority = PC_DEF_PRI # no pp_corr means that classification will not be based on encap pp_corr = flowrule.get('pp_corr') node_type = flowrule['node_type'] branch_info = flowrule.get('branch_info') on_add = None flow_count = 1 if branch_info and node_type == constants.SRC_NODE: # for branching, we need as many flows (per flow info) as branches # because we can't AND-match the same field in a single flow flow_count = len(branch_info.get('matches')) on_add = branch_info.get('on_add') if match_inport is True: egress_port = self.br_int.get_vif_port_by_id(flowrule['egress']) if egress_port: inport_match = {'in_port': egress_port.ofport} priority = PC_INGRESS_PRI for flow_info in self._get_flow_infos_from_flow_classifier_list( flow_classifier_list, flowrule): match_info = dict(inport_match) match_info.update(flow_info) if node_type == constants.SF_NODE: if pp_corr: match_info = {'in_port': match_info['in_port']} if pp_corr == 'mpls': match_info = self._build_classification_match_sfc_mpls( flowrule, match_info) elif pp_corr == 'nsh': match_info = self._build_classification_match_sfc_nsh( flowrule, match_info) for i in range(flow_count): if branch_info: # for Service Graphs (branching): nsp = branch_info['matches'][i][0] nsi = branch_info['matches'][i][1] if add_flow: if on_add: self._match_by_header(match_info, nsp, nsi) self.br_int.add_flow(table=ovs_consts.LOCAL_SWITCHING, priority=priority, actions=actions, **match_info) else: if on_add is False: self._match_by_header(match_info, nsp, nsi) self.br_int.delete_flows(table=ovs_consts.LOCAL_SWITCHING, priority=priority, strict=True, **match_info) def _setup_egress_flow_rules(self, flowrule, match_inport=True): group_id = flowrule.get('next_group_id', None) next_hops = flowrule.get('next_hops', None) pc_corr = flowrule.get('pc_corr', 'mpls') pp_corr = flowrule.get('pp_corr', None) node_type = flowrule.get('node_type') next_hop_tap_enabled = None # if the group is not none, install the egress rule for this SF if group_id and next_hops: # 1st, install br-int flow rule on table ACROSS_SUBNET_TABLE # and group table buckets = [] vlan = self._get_vlan_by_port(flowrule['egress']) if isinstance(next(iter(next_hops)), dict): next_hop_tap_enabled = next_hops[0].get('tap_enabled') for item in next_hops: # all next hops share same pp_corr, enforced by higher layers pp_corr_nh = item.get('pp_corr', None) if flowrule['fwd_path']: bucket = ( 'bucket=weight=%d, mod_dl_dst:%s, resubmit(,%d)' % ( item['weight'], item['in_mac_address'], ACROSS_SUBNET_TABLE)) else: bucket = ( 'bucket=weight=%d, mod_dl_dst:%s, resubmit(,%d)' % ( item['weight'], item['mac_address'], ACROSS_SUBNET_TABLE)) buckets.append(bucket) subnet_actions_list = [] across_flow = "mod_vlan_vid:%d," % vlan # the classic encapsulation of packets in ACROSS_SUBNET_TABLE # is kept unchanged for the same scenarios, i.e. when the next # hops don't support encapsulation and neither the current one. if not pp_corr and pp_corr_nh is None: if pc_corr == 'mpls': push_encap = self._build_push_mpls(flowrule['nsp'], flowrule['nsi']) elif pc_corr == 'nsh': push_encap = self._build_push_nsh(flowrule['nsp'], flowrule['nsi']) across_flow = push_encap + across_flow subnet_actions_list.append(across_flow) if item['local_endpoint'] == self.local_ip: subnet_actions = 'resubmit(,%d)' % INGRESS_TABLE else: # same subnet with next hop subnet_actions = 'output:%s' % self.patch_tun_ofport subnet_actions_list.append(subnet_actions) eth_type = constants.ETH_TYPE_IP if pp_corr == 'mpls' or pp_corr_nh == 'mpls': eth_type = constants.ETH_TYPE_MPLS elif pp_corr == 'nsh' or pp_corr_nh == 'nsh': eth_type = constants.ETH_TYPE_NSH if item.get('tap_enabled'): self._add_tap_classification_flows(flowrule, item, subnet_actions_list) else: self._configure_across_subnet_flow(flowrule, item, subnet_actions_list, eth_type) if not next_hop_tap_enabled: self._add_group_table(buckets, flowrule, group_id) # 2nd, install br-int flow rule on table 0 for egress traffic enc_actions = "" # we only encapsulate on table 0 if we know the next hops will # support that encapsulation but the current hop doesn't already. if not pp_corr and pp_corr_nh: if pc_corr == 'mpls': enc_actions = self._build_push_mpls(flowrule['nsp'], flowrule['nsi']) elif pc_corr == 'nsh': enc_actions = self._build_push_nsh(flowrule['nsp'], flowrule['nsi']) if flowrule['fwd_path']: enc_actions += "group:%d" % group_id else: rev_group_id = group_id + REVERSE_GROUP_NUMBER_OFFSET enc_actions += "group:%d" % rev_group_id enc_actions = self._update_enc_actions(enc_actions, flowrule, next_hop_tap_enabled) # to uninstall the removed flow classifiers self._setup_local_switch_flows_on_int_br( flowrule, flowrule['del_fcs'], None, add_flow=False, match_inport=match_inport) # to install the added flow classifiers self._setup_local_switch_flows_on_int_br( flowrule, flowrule['add_fcs'], enc_actions, add_flow=True, match_inport=match_inport) else: end_of_chain_actions = 'normal' # at the end of the chain, the header must be removed (if used) if (node_type != constants.SRC_NODE) and pp_corr: branch_point = flowrule.get('branch_point') if branch_point: nsp = flowrule['nsp'] nsi = flowrule['nsi'] sfpi = (nsp << 8) | nsi if pc_corr == 'mpls': end_of_chain_actions = ( 'load:%s->NXM_NX_REG0[],' 'pop_mpls:0x%04x,resubmit(,0)' % ( hex(sfpi), constants.ETH_TYPE_IP)) elif pc_corr == 'nsh': end_of_chain_actions = ( "load:%s->NXM_NX_REG0[]," "decap(),decap(),resubmit(,0)" % ( hex(sfpi))) else: if pc_corr == 'mpls': end_of_chain_actions = ("pop_mpls:0x%04x,%s" % ( constants.ETH_TYPE_IP, end_of_chain_actions)) elif pc_corr == 'nsh': end_of_chain_actions = ("decap(),decap(),%s" % ( end_of_chain_actions)) if flowrule.get('tap_enabled'): end_of_chain_actions += RESUBMIT_TAP_TABLE # to uninstall the removed flow classifiers if 'del_fcs' in flowrule: self._setup_local_switch_flows_on_int_br( flowrule, flowrule['del_fcs'], None, add_flow=False, match_inport=True) if 'add_fcs' in flowrule: # to install the added flow classifiers self._setup_local_switch_flows_on_int_br( flowrule, flowrule['add_fcs'], actions=end_of_chain_actions, add_flow=True, match_inport=True) def _get_vlan_by_port(self, port_id): try: net_uuid = self.vlan_manager.get_net_uuid(port_id) return self.vlan_manager.get(net_uuid).vlan except (vlanmanager.VifIdNotFound, vlanmanager.MappingNotFound): return None def _setup_ingress_flow_rules(self, flowrule): vif_port = self.br_int.get_vif_port_by_id(flowrule['ingress']) if vif_port: vlan = self._get_vlan_by_port(flowrule['ingress']) pc_corr = flowrule['pc_corr'] pp_corr = flowrule['pp_corr'] # install br-int flow rule on table 0 for ingress traffic # install an SFC Proxy if the port pair doesn't support the # SFC encapsulation (pc_corr) specified in the chain if pc_corr == 'mpls': if flowrule.get('tap_enabled'): return self._add_tap_ingress_flow(flowrule, vif_port, vlan) if pp_corr is None: match_field = self._build_proxy_sfc_mpls(flowrule, vif_port, vlan) elif pp_corr == 'mpls': match_field = self._build_forward_sfc_mpls(flowrule, vif_port, vlan) elif pc_corr == 'nsh': if pp_corr is None: match_field = self._build_proxy_sfc_nsh(flowrule, vif_port, vlan) elif pp_corr == 'nsh': match_field = self._build_forward_sfc_nsh(flowrule, vif_port, vlan) self.br_int.add_flow(**match_field) def _build_classification_match_sfc_mpls(self, flowrule, match_info): match_info['eth_type'] = constants.ETH_TYPE_MPLS match_info['mpls_label'] = flowrule['nsp'] << 8 | flowrule['nsi'] return match_info def _build_classification_match_sfc_nsh(self, flowrule, match_info): match_info['eth_type'] = constants.ETH_TYPE_NSH match_info['nsh_mdtype'] = 1 match_info['nsh_spi'] = flowrule['nsp'] match_info['nsh_si'] = flowrule['nsi'] return match_info def _build_push_mpls(self, nsp, nsi): return ( "push_mpls:0x%04x," "set_mpls_label:%d," "set_mpls_ttl:%d," % (constants.ETH_TYPE_MPLS, nsp << 8 | nsi, nsi)) def _build_push_nsh(self, nsp, nsi): return ( "encap(nsh,prop(class=nsh,type=md_type,val=1))," "set_field:%s->nsh_spi,set_field:%s->nsh_si," "encap(ethernet)," % (hex(nsp), hex(nsi))) def _build_ingress_common_match_field(self, vif_port, vlan): return dict( table=INGRESS_TABLE, priority=1, dl_dst=vif_port.vif_mac, dl_vlan=vlan) def _build_ingress_match_field_sfc_mpls(self, flowrule, vif_port, vlan): match_field = self._build_ingress_common_match_field(vif_port, vlan) match_field['eth_type'] = constants.ETH_TYPE_MPLS match_field['mpls_label'] = flowrule['nsp'] << 8 | flowrule['nsi'] + 1 return match_field def _build_ingress_match_field_sfc_nsh(self, flowrule, vif_port, vlan): match_field = self._build_ingress_common_match_field(vif_port, vlan) match_field['eth_type'] = constants.ETH_TYPE_NSH match_field['nsh_mdtype'] = 1 match_field['nsh_spi'] = flowrule['nsp'] match_field['nsh_si'] = flowrule['nsi'] + 1 return match_field def _build_proxy_sfc_mpls(self, flowrule, vif_port, vlan): match_field = self._build_ingress_match_field_sfc_mpls( flowrule, vif_port, vlan) actions = ("strip_vlan, pop_mpls:0x%04x," "output:%s" % (constants.ETH_TYPE_IP, vif_port.ofport)) match_field['actions'] = actions return match_field def _build_proxy_sfc_nsh(self, flowrule, vif_port, vlan): match_field = self._build_ingress_match_field_sfc_nsh( flowrule, vif_port, vlan) actions = ("strip_vlan,move:NXM_OF_ETH_DST->OXM_OF_PKT_REG0[0..47]," "decap(),decap()," "move:OXM_OF_PKT_REG0[0..47]->NXM_OF_ETH_DST,output:%s" "" % vif_port.ofport) match_field['actions'] = actions return match_field def _build_forward_sfc_mpls(self, flowrule, vif_port, vlan): match_field = self._build_ingress_match_field_sfc_mpls( flowrule, vif_port, vlan) actions = ("strip_vlan, output:%s" % vif_port.ofport) match_field['actions'] = actions return match_field def _build_forward_sfc_nsh(self, flowrule, vif_port, vlan): match_field = self._build_ingress_match_field_sfc_nsh( flowrule, vif_port, vlan) actions = ("strip_vlan, output:%s" % vif_port.ofport) match_field['actions'] = actions return match_field def _delete_flows_mpls(self, flowrule, vif_port): if flowrule.get('tap_enabled'): self.br_int.delete_flows( table=INGRESS_TABLE, eth_type=constants.ETH_TYPE_MPLS, dl_src=flowrule['mac_address'], mpls_label=flowrule['nsp'] << 8 | flowrule['nsi'] ) else: self.br_int.delete_flows( table=INGRESS_TABLE, eth_type=constants.ETH_TYPE_MPLS, dl_dst=vif_port.vif_mac, mpls_label=flowrule['nsp'] << 8 | flowrule['nsi'] + 1 ) def _add_group_table(self, buckets, flowrule, group_id): group_content = self.br_int.dump_group_for_id(group_id) buckets = ','.join(buckets) if flowrule['fwd_path']: if group_content.find('group_id=%d' % group_id) == -1: self.br_int.add_group(group_id=group_id, type='select', buckets=buckets) else: self.br_int.mod_group(group_id=group_id, type='select', buckets=buckets) else: # set different id for rev_group rev_group_id = group_id + REVERSE_GROUP_NUMBER_OFFSET if group_content.find('group_id=%d' % (rev_group_id)) == -1: self.br_int.add_group(group_id=rev_group_id, type='select', buckets=buckets) else: self.br_int.mod_group(group_id=rev_group_id, type='select', buckets=buckets) def _configure_across_subnet_flow(self, flowrule, item, subnet_actions_list, eth_type): if flowrule['fwd_path']: self.br_int.add_flow( table=ACROSS_SUBNET_TABLE, priority=0, dl_dst=item['in_mac_address'], eth_type=eth_type, actions="%s" % ','.join(subnet_actions_list)) else: self.br_int.add_flow( table=ACROSS_SUBNET_TABLE, priority=0, dl_dst=item['mac_address'], eth_type=eth_type, actions="%s" % ','.join(subnet_actions_list)) def _add_tap_classification_flows(self, flowrule, item, subnet_actions_list): egress_port = self.br_int.get_vif_port_by_id(flowrule['egress']) vlan = self._get_vlan_by_port(flowrule['egress']) if not egress_port: return in_port = egress_port.ofport vif_mac = egress_port.vif_mac tap_action = "" if flowrule['pc_corr'] == 'mpls': tap_action += self._build_push_mpls(item['nsp'], item['nsi']) tap_action += "mod_vlan_vid:%d," % vlan subnet_actions_list[0] = tap_action ovs_rule = dict() self._get_eth_type(flowrule, item, ovs_rule) ovs_rule.update(table=TAP_CLASSIFIER_TABLE, priority=0, in_port=in_port, dl_src=vif_mac, actions="%s" % ''.join(subnet_actions_list) ) self.br_int.add_flow(**ovs_rule) if item['local_endpoint'] != self.local_ip: self._configure_tunnel_bridge_flows(flowrule, item, vif_mac) def _get_eth_type(self, flowrule, item, ovs_rule): # eth_type is decided based on current node's pp_corr and next node of # Tap node's pp_corr if flowrule['pp_corr'] == item.get('pp_corr_tap_nh') is None: ovs_rule.update(eth_type=constants.ETH_TYPE_IP) elif flowrule['pp_corr'] and not item.get('pp_corr_tap_nh'): ovs_rule.update(eth_type=constants.ETH_TYPE_IP) elif not flowrule['pp_corr'] and item.get('pp_corr_tap_nh'): if item['pp_corr_tap_nh'] == 'mpls': eth_type = constants.ETH_TYPE_MPLS mpls_label = flowrule['nsp'] << 8 | flowrule['nsi'] ovs_rule.update(mpls_label=mpls_label, eth_type=eth_type) else: if flowrule['pp_corr'] == 'mpls' or item.get( 'pp_corr_tap_nh') == 'mpls': ovs_rule.update(eth_type=constants.ETH_TYPE_MPLS) def _configure_tunnel_bridge_flows(self, flowrule, item, vif_mac): local_tunnel_ports = [port for port in self.br_tun.get_bridge_ports() if port != self.patch_int_ofport] match_info = {'in_port': self.patch_int_ofport, 'dl_src': vif_mac} if flowrule['pc_corr'] == 'mpls': self._build_classification_match_sfc_mpls(item, match_info) self.br_tun.add_flow( table=0, priority=30, actions="resubmit(,%s)" % TAP_TUNNEL_OUTPUT_TABLE, **match_info ) output_actions = "strip_vlan,load:%s->NXM_NX_TUN_ID[]" % ( hex(flowrule['segment_id'])) for port in local_tunnel_ports: output_actions += (",output:%d" % port) self.br_tun.add_flow( table=TAP_TUNNEL_OUTPUT_TABLE, priority=0, actions=output_actions, **match_info ) def _build_buckets(self, buckets, flowrule, item): if item.get('tap_enabled'): # Tap PPG doesn't use bucket as of now. return if flowrule['fwd_path']: bucket = ( 'bucket=weight=%d, mod_dl_dst:%s, resubmit(,%d)' % ( item['weight'], item['in_mac_address'], ACROSS_SUBNET_TABLE)) else: bucket = ( 'bucket=weight=%d, mod_dl_dst:%s, resubmit(,%d)' % ( item['weight'], item['mac_address'], ACROSS_SUBNET_TABLE)) buckets.append(bucket) def _update_enc_actions(self, enc_actions, flow_rule, next_hop_tap_enabled): # Add resubmit action to send to TAP table. if next_hop_tap_enabled: pp_corr = flow_rule['pp_corr'] pp_corr_tap_nh = flow_rule['next_hops'][0].get('pp_corr_tap_nh') tap_nh_node_type = flow_rule['next_hops'][0].get( 'tap_nh_node_type', constants.DST_NODE) group_action = enc_actions.split(',')[-1] enc_actions = "" if tap_nh_node_type == constants.SF_NODE: if not pp_corr and pp_corr_tap_nh: if flow_rule.get('pc_corr', 'mpls') == 'mpls': mpls_act = self._build_push_mpls(flow_rule['nsp'], flow_rule['nsi']) enc_actions += mpls_act # enc_actions += group_action enc_actions += group_action else: if flow_rule['pc_corr'] == 'mpls': # For DST Node if flow_rule['pp_corr']: enc_actions = ('pop_mpls:0x%04x,%s' % ( constants.ETH_TYPE_IP, NORMAL_ACTION)) else: enc_actions += NORMAL_ACTION return enc_actions + RESUBMIT_TAP_TABLE elif flow_rule.get('tap_enabled'): return enc_actions + RESUBMIT_TAP_TABLE return enc_actions def _delete_across_subnet_table_flows(self, flowrule): if not flowrule['next_hops']: return tap_enabled = flowrule['next_hops'][0].get('tap_enabled', False) if tap_enabled: egress_port = self.br_int.get_vif_port_by_id(flowrule['egress']) for item in flowrule['next_hops']: if flowrule['fwd_path']: self.br_int.delete_flows( table=TAP_CLASSIFIER_TABLE, dl_src=egress_port.vif_mac) else: self.br_int.delete_flows( table=TAP_CLASSIFIER_TABLE, dl_src=egress_port.vif_mac) if item['local_endpoint'] != self.local_ip: self._delete_tunnel_bridge_flows(flowrule, egress_port.vif_mac) else: for item in flowrule['next_hops']: if flowrule['fwd_path']: self.br_int.delete_flows( table=ACROSS_SUBNET_TABLE, dl_dst=item['in_mac_address']) else: self.br_int.delete_flows( table=ACROSS_SUBNET_TABLE, dl_dst=item['mac_address']) def _add_tap_ingress_flow(self, flowrule, vif_port, vlan): match_field = self._build_tap_ingress_match_field_sfc_mpls( flowrule, vif_port, vlan) actions = ('strip_vlan, pop_mpls:0x%04x,output:%s' % (constants.ETH_TYPE_MPLS, vif_port.ofport)) match_field['actions'] = actions match_field.pop('dl_dst', None) match_field.update(dl_src=flowrule['mac_address']) self.br_int.add_flow(**match_field) def _build_tap_ingress_match_field_sfc_mpls(self, flowrule, vif_port, vlan): match_field = self._build_ingress_common_match_field(vif_port, vlan) match_field['eth_type'] = constants.ETH_TYPE_MPLS match_field['mpls_label'] = flowrule['nsp'] << 8 | flowrule['nsi'] return match_field def _delete_tunnel_bridge_flows(self, flowrule, src_mac): match_info = {'in_port': self.patch_int_ofport, 'dl_src': src_mac} # Use Tap 'nsi' flowrule_copy = flowrule.copy() flowrule_copy['nsi'], flowrule_copy['nsp'] = ( flowrule['next_hops'][0]['nsi'], flowrule['next_hops'][0]['nsp']) self._build_classification_match_sfc_mpls(flowrule_copy, match_info) self.br_tun.delete_flows(table=0, **match_info) self.br_tun.delete_flows(table=TAP_TUNNEL_OUTPUT_TABLE, **match_info) def _clear_sfc_flow_on_tun_br(self): self.br_tun.delete_flows(table=0, eth_type=constants.ETH_TYPE_MPLS) self.br_tun.delete_flows(table=TAP_TUNNEL_OUTPUT_TABLE) def _delete_flows_nsh(self, flowrule, vif_port): self.br_int.delete_flows( table=INGRESS_TABLE, eth_type=constants.ETH_TYPE_NSH, dl_dst=vif_port.vif_mac, nsh_mdtype=1, nsh_spi=flowrule['nsp'], nsh_si=flowrule['nsi'] + 1 ) networking-sfc-10.0.0/networking_sfc/services/sfc/agent/extensions/openvswitch/__init__.py0000664000175000017500000000000013656750333032152 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/services/sfc/agent/extensions/sfc.py0000664000175000017500000001560413656750333026635 0ustar zuulzuul00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from neutron.agent import rpc as agent_rpc from neutron import manager from neutron_lib.agent import l2_extension from neutron_lib.agent import topics from neutron_lib import rpc as n_rpc from oslo_config import cfg from oslo_log import log as logging import oslo_messaging import six from networking_sfc.services.sfc.drivers.ovs import rpc_topics as sfc_topics LOG = logging.getLogger(__name__) class SfcPluginApi(object): def __init__(self, topic, host): self.host = host self.target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(self.target) def update_flowrules_status(self, context, flowrules_status): cctxt = self.client.prepare() return cctxt.call( context, 'update_flowrules_status', flowrules_status=flowrules_status) def get_flowrules_by_host_portid(self, context, port_id): cctxt = self.client.prepare() return cctxt.call( context, 'get_flowrules_by_host_portid', host=self.host, port_id=port_id) @six.add_metaclass(abc.ABCMeta) class SfcAgentDriver(object): """Defines stable abstract interface for SFC Agent Driver.""" @abc.abstractmethod def initialize(self): """Perform SFC agent driver initialization.""" def consume_api(self, agent_api): """Consume the AgentAPI instance from the SfcAgentExtension class :param agent_api: An instance of an agent specific API """ def update_flow_rules(self, flowrule, flowrule_status): """Update a flow rule in driver.""" def delete_flow_rule(self, flowrule, flowrule_status): """Delete a flow rule in driver.""" class SfcAgentExtension(l2_extension.L2AgentExtension): def initialize(self, connection, driver_type): """Initialize agent extension.""" self.sfc_driver = manager.NeutronManager.load_class_for_provider( 'networking_sfc.sfc.agent_drivers', driver_type)() self.sfc_driver.consume_api(self.agent_api) self.sfc_driver.initialize() self._sfc_setup_rpc() def consume_api(self, agent_api): """Receive neutron agent API object Allows an extension to gain access to resources internal to the neutron agent and otherwise unavailable to the extension. """ self.agent_api = agent_api def handle_port(self, context, port): """Handle agent SFC extension port add/update.""" port_id = port['port_id'] resync = False flowrule_status = [] try: LOG.debug("a new device %s is found", port_id) flows_list = ( self.sfc_plugin_rpc.get_flowrules_by_host_portid( context, port_id ) ) if flows_list: for flow in flows_list: self.sfc_driver.update_flow_rules( flow, flowrule_status) except Exception as e: LOG.exception(e) LOG.error("SFC L2 extension handle_port failed") resync = True if flowrule_status: self.sfc_plugin_rpc.update_flowrules_status( context, flowrule_status) return resync def delete_port(self, context, port): """Handle agent SFC extension port delete.""" port_id = port['port_id'] resync = False LOG.info("a device %s is removed", port_id) try: self._delete_ports_flowrules_by_id(context, port_id) except Exception as e: LOG.exception(e) LOG.error( "delete port flow rule failed for %(port_id)s", {'port_id': port_id} ) resync = True return resync def update_flow_rules(self, context, **kwargs): flowrule_status = [] try: flowrules = kwargs['flowrule_entries'] LOG.debug("update_flow_rules received, flowrules = %s", flowrules) if flowrules: self.sfc_driver.update_flow_rules( flowrules, flowrule_status) except Exception as e: LOG.exception(e) LOG.error("update_flow_rules failed") if flowrule_status: self.sfc_plugin_rpc.update_flowrules_status( context, flowrule_status) def delete_flow_rules(self, context, **kwargs): flowrule_status = [] try: flowrules = kwargs['flowrule_entries'] LOG.debug("delete_flow_rules received, flowrules= %s", flowrules) if flowrules: self.sfc_driver.delete_flow_rule( flowrules, flowrule_status) except Exception as e: LOG.exception(e) LOG.error("delete_flow_rules failed") if flowrule_status: self.sfc_plugin_rpc.update_flowrules_status( context, flowrule_status) def _sfc_setup_rpc(self): self.sfc_plugin_rpc = SfcPluginApi( sfc_topics.SFC_PLUGIN, cfg.CONF.host) self.topic = sfc_topics.SFC_AGENT self.endpoints = [self] consumers = [ [sfc_topics.PORTFLOW, topics.UPDATE], [sfc_topics.PORTFLOW, topics.DELETE] ] # subscribe sfc plugin message self.connection = agent_rpc.create_consumers( self.endpoints, self.topic, consumers) def _delete_ports_flowrules_by_id(self, context, ports_id): flowrule_status = [] try: LOG.debug("delete_port_id_flows received, ports_id= %s", ports_id) count = 0 if ports_id: for port_id in ports_id: flowrule = ( self.sfc_plugin_rpc.get_flowrules_by_host_portid( context, port_id ) ) if flowrule: self.sfc.driver.delete_flow_rule( flowrule, flowrule_status) LOG.debug( "_delete_ports_flowrules_by_id received, count= %s", count) except Exception as e: LOG.exception(e) LOG.error("delete_port_id_flows failed") if flowrule_status: self.sfc_plugin_rpc.update_flowrules_status( context, flowrule_status) networking-sfc-10.0.0/networking_sfc/services/sfc/plugin.py0000664000175000017500000003310413656750333024056 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from neutron_lib.db import api as db_api from networking_sfc.db import sfc_db from networking_sfc.extensions import servicegraph as sg_ext from networking_sfc.extensions import sfc as sfc_ext from networking_sfc.extensions import tap as tap_ext from networking_sfc.services.sfc.common import context as sfc_ctx from networking_sfc.services.sfc.common import exceptions as sfc_exc from networking_sfc.services.sfc import driver_manager as sfc_driver LOG = logging.getLogger(__name__) class SfcPlugin(sfc_db.SfcDbPlugin): """SFC plugin implementation.""" # REVISIT(vks1) This should be changed to string instead of importing # extensions explicitly. So that even if extensions increase in future, # imports do not. supported_extension_aliases = [sfc_ext.SFC_EXT, sg_ext.SG_EXT, tap_ext.TAP_EXT] path_prefix = sfc_ext.SFC_PREFIX def __init__(self): self.driver_manager = sfc_driver.SfcDriverManager() super(SfcPlugin, self).__init__() self.driver_manager.initialize() @log_helpers.log_method_call def create_port_chain(self, context, port_chain): with db_api.CONTEXT_WRITER.using(context): port_chain_db = super(SfcPlugin, self).create_port_chain( context, port_chain) portchain_db_context = sfc_ctx.PortChainContext( self, context, port_chain_db) self.driver_manager.create_port_chain_precommit( portchain_db_context) try: self.driver_manager.create_port_chain_postcommit( portchain_db_context) except sfc_exc.SfcDriverError as e: LOG.exception(e) with excutils.save_and_reraise_exception(): LOG.error("Create port chain failed, " "deleting port_chain '%s'", port_chain_db['id']) self.delete_port_chain(context, port_chain_db['id']) return port_chain_db @log_helpers.log_method_call def update_port_chain(self, context, portchain_id, port_chain): with db_api.CONTEXT_WRITER.using(context): original_portchain = self.get_port_chain(context, portchain_id) updated_portchain = super(SfcPlugin, self).update_port_chain( context, portchain_id, port_chain) portchain_db_context = sfc_ctx.PortChainContext( self, context, updated_portchain, original_portchain=original_portchain) self.driver_manager.update_port_chain_precommit( portchain_db_context) try: self.driver_manager.update_port_chain_postcommit( portchain_db_context) except sfc_exc.SfcDriverError as e: LOG.exception(e) with excutils.save_and_reraise_exception(): LOG.error("Update port chain failed, port_chain '%s'", updated_portchain['id']) # TODO(qijing): should we rollback the database update here? return updated_portchain @log_helpers.log_method_call def delete_port_chain(self, context, portchain_id): pc = self.get_port_chain(context, portchain_id) pc_context = sfc_ctx.PortChainContext(self, context, pc) try: self.driver_manager.delete_port_chain(pc_context) except sfc_exc.SfcDriverError as e: LOG.exception(e) with excutils.save_and_reraise_exception(): LOG.error("Delete port chain failed, portchain '%s'", portchain_id) # TODO(qijing): unsync in case deleted in driver but fail in database with db_api.CONTEXT_WRITER.using(context): pc = self.get_port_chain(context, portchain_id) pc_context = sfc_ctx.PortChainContext(self, context, pc) super(SfcPlugin, self).delete_port_chain(context, portchain_id) self.driver_manager.delete_port_chain_precommit(pc_context) self.driver_manager.delete_port_chain_postcommit(pc_context) @log_helpers.log_method_call def create_port_pair(self, context, port_pair): with db_api.CONTEXT_WRITER.using(context): portpair_db = super(SfcPlugin, self).create_port_pair( context, port_pair) portpair_context = sfc_ctx.PortPairContext( self, context, portpair_db) self.driver_manager.create_port_pair_precommit(portpair_context) try: self.driver_manager.create_port_pair_postcommit(portpair_context) except sfc_exc.SfcDriverError as e: LOG.exception(e) with excutils.save_and_reraise_exception(): LOG.error("Create port pair failed, " "deleting port_pair '%s'", portpair_db['id']) self.delete_port_pair(context, portpair_db['id']) return portpair_db @log_helpers.log_method_call def update_port_pair(self, context, portpair_id, port_pair): with db_api.CONTEXT_WRITER.using(context): original_portpair = self.get_port_pair(context, portpair_id) updated_portpair = super(SfcPlugin, self).update_port_pair( context, portpair_id, port_pair) portpair_context = sfc_ctx.PortPairContext( self, context, updated_portpair, original_portpair=original_portpair) self.driver_manager.update_port_pair_precommit(portpair_context) try: self.driver_manager.update_port_pair_postcommit(portpair_context) except sfc_exc.SfcDriverError as e: LOG.exception(e) with excutils.save_and_reraise_exception(): LOG.error("Update port pair failed, port_pair '%s'", updated_portpair['id']) return updated_portpair @log_helpers.log_method_call def delete_port_pair(self, context, portpair_id): portpair = self.get_port_pair(context, portpair_id) portpair_context = sfc_ctx.PortPairContext( self, context, portpair) try: self.driver_manager.delete_port_pair(portpair_context) except sfc_exc.SfcDriverError as e: LOG.exception(e) with excutils.save_and_reraise_exception(): LOG.error("Delete port pair failed, port_pair '%s'", portpair_id) with db_api.CONTEXT_WRITER.using(context): portpair = self.get_port_pair(context, portpair_id) portpair_context = sfc_ctx.PortPairContext( self, context, portpair) super(SfcPlugin, self).delete_port_pair(context, portpair_id) self.driver_manager.delete_port_pair_precommit(portpair_context) self.driver_manager.delete_port_pair_postcommit(portpair_context) @log_helpers.log_method_call def create_port_pair_group(self, context, port_pair_group): with db_api.CONTEXT_WRITER.using(context): portpairgroup_db = super(SfcPlugin, self).create_port_pair_group( context, port_pair_group) portpairgroup_context = sfc_ctx.PortPairGroupContext( self, context, portpairgroup_db) self.driver_manager.create_port_pair_group_precommit( portpairgroup_context) try: self.driver_manager.create_port_pair_group_postcommit( portpairgroup_context) except sfc_exc.SfcDriverError as e: LOG.exception(e) with excutils.save_and_reraise_exception(): LOG.error("Create port pair group failed, " "deleting port_pair_group '%s'", portpairgroup_db['id']) self.delete_port_pair_group(context, portpairgroup_db['id']) return portpairgroup_db @log_helpers.log_method_call def update_port_pair_group( self, context, portpairgroup_id, port_pair_group ): with db_api.CONTEXT_WRITER.using(context): original_portpairgroup = self.get_port_pair_group( context, portpairgroup_id) updated_portpairgroup = super( SfcPlugin, self).update_port_pair_group( context, portpairgroup_id, port_pair_group) portpairgroup_context = sfc_ctx.PortPairGroupContext( self, context, updated_portpairgroup, original_portpairgroup=original_portpairgroup) self.driver_manager.update_port_pair_group_precommit( portpairgroup_context) try: self.driver_manager.update_port_pair_group_postcommit( portpairgroup_context) except sfc_exc.SfcDriverError as e: LOG.exception(e) with excutils.save_and_reraise_exception(): LOG.error("Update port pair group failed, " "port_pair_group '%s'", updated_portpairgroup['id']) return updated_portpairgroup @log_helpers.log_method_call def delete_port_pair_group(self, context, portpairgroup_id): portpairgroup = self.get_port_pair_group(context, portpairgroup_id) portpairgroup_context = sfc_ctx.PortPairGroupContext( self, context, portpairgroup) try: self.driver_manager.delete_port_pair_group(portpairgroup_context) except sfc_exc.SfcDriverError as e: LOG.exception(e) with excutils.save_and_reraise_exception(): LOG.error("Delete port pair group failed, " "port_pair_group '%s'", portpairgroup_id) with db_api.CONTEXT_WRITER.using(context): portpairgroup = self.get_port_pair_group(context, portpairgroup_id) portpairgroup_context = sfc_ctx.PortPairGroupContext( self, context, portpairgroup) super(SfcPlugin, self).delete_port_pair_group(context, portpairgroup_id) self.driver_manager.delete_port_pair_group_precommit( portpairgroup_context) self.driver_manager.delete_port_pair_group_postcommit( portpairgroup_context) @log_helpers.log_method_call def create_service_graph(self, context, service_graph): with db_api.CONTEXT_WRITER.using(context): service_graph_db = super(SfcPlugin, self).create_service_graph( context, service_graph) service_graph_db_context = sfc_ctx.ServiceGraphContext( self, context, service_graph_db) self.driver_manager.create_service_graph_precommit( service_graph_db_context) try: self.driver_manager.create_service_graph_postcommit( service_graph_db_context) except sfc_exc.SfcDriverError as e: LOG.exception(e) with excutils.save_and_reraise_exception(): LOG.error("Create Service Graph failed, " "deleting Service Graph '%s'", service_graph_db['id']) self.delete_service_graph(context, service_graph_db['id']) return service_graph_db @log_helpers.log_method_call def update_service_graph(self, context, id, service_graph): with db_api.CONTEXT_WRITER.using(context): original_graph = self.get_service_graph(context, id) updated_graph = super(SfcPlugin, self).update_service_graph( context, id, service_graph) service_graph_db_context = sfc_ctx.ServiceGraphContext( self, context, updated_graph, original_graph=original_graph) self.driver_manager.update_service_graph_precommit( service_graph_db_context) try: self.driver_manager.update_service_graph_postcommit( service_graph_db_context) except sfc_exc.SfcDriverError: with excutils.save_and_reraise_exception(): LOG.error("Update failed, service_graph '%s'", updated_graph['id']) return updated_graph @log_helpers.log_method_call def delete_service_graph(self, context, id): graph = self.get_service_graph(context, id) graph_context = sfc_ctx.ServiceGraphContext(self, context, graph) with db_api.CONTEXT_WRITER.using(context): graph = self.get_service_graph(context, id) graph_context = sfc_ctx.ServiceGraphContext(self, context, graph) super(SfcPlugin, self).delete_service_graph(context, id) self.driver_manager.delete_service_graph_precommit(graph_context) try: self.driver_manager.delete_service_graph_postcommit(graph_context) except sfc_exc.SfcDriverError as e: LOG.exception(e) with excutils.save_and_reraise_exception(): LOG.error("Delete failed, service_graph '%s'", id) networking-sfc-10.0.0/networking_sfc/opts.py0000664000175000017500000000234113656750333021146 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from networking_sfc.extensions import flowclassifier from networking_sfc.extensions import servicegraph from networking_sfc.extensions import sfc from networking_sfc.services.flowclassifier.common import config as fc_config from networking_sfc.services.sfc.common import config as sfc_config def list_quota_opts(): return [ ('quotas', itertools.chain( flowclassifier.flow_classifier_quota_opts, sfc.sfc_quota_opts, servicegraph.service_graph_quota_opts) ), ] def list_sfc_opts(): return [ ('flowclassifier', fc_config.FLOWCLASSIFIER_DRIVER_OPTS), ('sfc', sfc_config.SFC_DRIVER_OPTS), ] networking-sfc-10.0.0/networking_sfc/policies/0000775000175000017500000000000013656750461021420 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/policies/flow_classifier.py0000664000175000017500000000354713656750333025154 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from networking_sfc.policies import base rules = [ policy.DocumentedRuleDefault( 'create_flow_classifier', base.RULE_ANY, 'Create a flow classifier', [ { 'method': 'POST', 'path': '/sfc/flow_classifiers', }, ] ), policy.DocumentedRuleDefault( 'update_flow_classifier', base.RULE_ADMIN_OR_OWNER, 'Update a flow classifier', [ { 'method': 'PUT', 'path': '/sfc/flow_classifiers/{id}', }, ] ), policy.DocumentedRuleDefault( 'delete_flow_classifier', base.RULE_ADMIN_OR_OWNER, 'Delete a flow classifier', [ { 'method': 'DELETE', 'path': '/sfc/flow_classifiers/{id}', }, ] ), policy.DocumentedRuleDefault( 'get_flow_classifier', base.RULE_ADMIN_OR_OWNER, 'Get flow classifiers', [ { 'method': 'GET', 'path': '/sfc/flow_classifiers', }, { 'method': 'GET', 'path': '/sfc/flow_classifiers/{id}', }, ] ), ] def list_rules(): return rules networking-sfc-10.0.0/networking_sfc/policies/__init__.py0000664000175000017500000000205513656750333023531 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from networking_sfc.policies import flow_classifier from networking_sfc.policies import port_chain from networking_sfc.policies import port_pair from networking_sfc.policies import port_pair_group from networking_sfc.policies import service_graph def list_rules(): return itertools.chain( flow_classifier.list_rules(), port_chain.list_rules(), port_pair_group.list_rules(), port_pair.list_rules(), service_graph.list_rules(), ) networking-sfc-10.0.0/networking_sfc/policies/port_chain.py0000664000175000017500000000344613656750333024125 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from networking_sfc.policies import base rules = [ policy.DocumentedRuleDefault( 'create_port_chain', base.RULE_ANY, 'Create a port chain', [ { 'method': 'POST', 'path': '/sfc/port_chains', }, ] ), policy.DocumentedRuleDefault( 'update_port_chain', base.RULE_ADMIN_OR_OWNER, 'Update a port chain', [ { 'method': 'PUT', 'path': '/sfc/port_chains/{id}', }, ] ), policy.DocumentedRuleDefault( 'delete_port_chain', base.RULE_ADMIN_OR_OWNER, 'Delete a port chain', [ { 'method': 'DELETE', 'path': '/sfc/port_chains/{id}', }, ] ), policy.DocumentedRuleDefault( 'get_port_chain', base.RULE_ADMIN_OR_OWNER, 'Get port chains', [ { 'method': 'GET', 'path': '/sfc/port_chains', }, { 'method': 'GET', 'path': '/sfc/port_chains/{id}', }, ] ), ] def list_rules(): return rules networking-sfc-10.0.0/networking_sfc/policies/service_graph.py0000664000175000017500000000351513656750333024615 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from networking_sfc.policies import base rules = [ policy.DocumentedRuleDefault( 'create_service_graph', base.RULE_ANY, 'Create a service graph', [ { 'method': 'POST', 'path': '/sfc/service_graphs', }, ] ), policy.DocumentedRuleDefault( 'update_service_graph', base.RULE_ADMIN_OR_OWNER, 'Update a service graph', [ { 'method': 'PUT', 'path': '/sfc/service_graphs/{id}', }, ] ), policy.DocumentedRuleDefault( 'delete_service_graph', base.RULE_ADMIN_OR_OWNER, 'Delete a service graph', [ { 'method': 'DELETE', 'path': '/sfc/service_graphs/{id}', }, ] ), policy.DocumentedRuleDefault( 'get_service_graph', base.RULE_ADMIN_OR_OWNER, 'Get service graphs', [ { 'method': 'GET', 'path': '/sfc/service_graphs', }, { 'method': 'GET', 'path': '/sfc/service_graphs/{id}', }, ] ), ] def list_rules(): return rules networking-sfc-10.0.0/networking_sfc/policies/port_pair_group.py0000664000175000017500000000354713656750333025214 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from networking_sfc.policies import base rules = [ policy.DocumentedRuleDefault( 'create_port_pair_group', base.RULE_ANY, 'Create a port pair group', [ { 'method': 'POST', 'path': '/sfc/port_pair_groups', }, ] ), policy.DocumentedRuleDefault( 'update_port_pair_group', base.RULE_ADMIN_OR_OWNER, 'Update a port pair group', [ { 'method': 'PUT', 'path': '/sfc/port_pair_groups/{id}', }, ] ), policy.DocumentedRuleDefault( 'delete_port_pair_group', base.RULE_ADMIN_OR_OWNER, 'Delete a port pair group', [ { 'method': 'DELETE', 'path': '/sfc/port_pair_groups/{id}', }, ] ), policy.DocumentedRuleDefault( 'get_port_pair_group', base.RULE_ADMIN_OR_OWNER, 'Get port pair groups', [ { 'method': 'GET', 'path': '/sfc/port_pair_groups', }, { 'method': 'GET', 'path': '/sfc/port_pair_groups/{id}', }, ] ), ] def list_rules(): return rules networking-sfc-10.0.0/networking_sfc/policies/port_pair.py0000664000175000017500000000343113656750333023770 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from networking_sfc.policies import base rules = [ policy.DocumentedRuleDefault( 'create_port_pair', base.RULE_ANY, 'Create a port pair', [ { 'method': 'POST', 'path': '/sfc/port_pairs', }, ] ), policy.DocumentedRuleDefault( 'update_port_pair', base.RULE_ADMIN_OR_OWNER, 'Update a port pair', [ { 'method': 'PUT', 'path': '/sfc/port_pairs/{id}', }, ] ), policy.DocumentedRuleDefault( 'delete_port_pair', base.RULE_ADMIN_OR_OWNER, 'Delete a port pair', [ { 'method': 'DELETE', 'path': '/sfc/port_pairs/{id}', }, ] ), policy.DocumentedRuleDefault( 'get_port_pair', base.RULE_ADMIN_OR_OWNER, 'Get port pairs', [ { 'method': 'GET', 'path': '/sfc/port_pairs', }, { 'method': 'GET', 'path': '/sfc/port_pairs/{id}', }, ] ), ] def list_rules(): return rules networking-sfc-10.0.0/networking_sfc/policies/base.py0000664000175000017500000000132313656750333022701 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(amotoki): Define these in neutron or neutron-lib RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner' RULE_ADMIN_ONLY = 'rule:admin_only' RULE_ANY = 'rule:regular_user' networking-sfc-10.0.0/networking_sfc/tests/0000775000175000017500000000000013656750461020753 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/__init__.py0000664000175000017500000000000013656750333023050 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/0000775000175000017500000000000013656750461021732 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/__init__.py0000664000175000017500000000000013656750333024027 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/cli/0000775000175000017500000000000013656750461022501 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/cli/__init__.py0000664000175000017500000000000013656750333024576 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/cli/test_flow_classifier.py0000664000175000017500000001773013656750333027273 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies India Pvt. Ltd. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import sys from unittest import mock from neutronclient import shell from neutronclient.tests.unit import test_cli20 from networking_sfc.cli import flow_classifier as fc from oslo_utils import uuidutils source_port_UUID = uuidutils.generate_uuid() destination_port_UUID = uuidutils.generate_uuid() class CLITestV20FCExtensionJSON(test_cli20.CLITestV20Base): def setUp(self): super(CLITestV20FCExtensionJSON, self).setUp() self._mock_extension_loading() self.register_non_admin_status_resource('flow_classifier') def _create_patch(self, name, func=None): patcher = mock.patch(name) thing = patcher.start() self.addCleanup(patcher.stop) return thing def _mock_extension_loading(self): ext_pkg = 'neutronclient.common.extension' flow_classifier = self._create_patch(ext_pkg + '._discover_via_entry_points') flow_classifier.return_value = [("flow_classifier", fc)] return flow_classifier def test_ext_cmd_loaded(self): neutron_shell = shell.NeutronShell('2.0') ext_cmd = {'flow-classifier-list': fc.FlowClassifierList, 'flow-classifier-create': fc.FlowClassifierCreate, 'flow-classifier-update': fc.FlowClassifierUpdate, 'flow-classifier-delete': fc.FlowClassifierDelete, 'flow-classifier-show': fc.FlowClassifierShow} for cmd_name, cmd_class in ext_cmd.items(): found = neutron_shell.command_manager.find_command([cmd_name]) self.assertEqual(cmd_class, found[0]) def test_create_flow_classifier_with_mandatory_params(self): """create flow-classifier: flow1.""" resource = 'flow_classifier' cmd = fc.FlowClassifierCreate(test_cli20.MyApp(sys.stdout), None) myid = 'myid' name = 'flow1' ethertype = 'IPv4' args = [ name, '--ethertype', ethertype, ] position_names = ['name', 'ethertype'] position_values = [name, ethertype] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values) def test_create_flow_classifier_with_all_params(self): """create flow-classifier: flow1.""" resource = 'flow_classifier' cmd = fc.FlowClassifierCreate(test_cli20.MyApp(sys.stdout), None) myid = 'myid' name = 'flow1' protocol_name = 'TCP' ethertype = 'IPv4' source_port = '0:65535' source_port_min = 0 source_port_max = 65535 destination_port = '1:65534' destination_port_min = 1 destination_port_max = 65534 source_ip = '192.168.1.0/24' destination_ip = '192.168.2.0/24' logical_source_port = '4a334cd4-fe9c-4fae-af4b-321c5e2eb051' logical_destination_port = '1278dcd4-459f-62ed-754b-87fc5e4a6751' description = 'my-desc' l7_param = "url=my_url" l7_param_expected = {"url": "my_url"} args = [name, '--protocol', protocol_name, '--ethertype', ethertype, '--source-port', source_port, '--destination-port', destination_port, '--source-ip-prefix', source_ip, '--destination-ip-prefix', destination_ip, '--logical-source-port', logical_source_port, '--logical-destination-port', logical_destination_port, '--description', description, '--l7-parameters', l7_param] position_names = ['name', 'protocol', 'ethertype', 'source_port_range_min', 'source_port_range_max', 'destination_port_range_min', 'destination_port_range_max', 'source_ip_prefix', 'destination_ip_prefix', 'logical_source_port', 'logical_destination_port', 'description', 'l7_parameters'] position_values = [name, protocol_name, ethertype, source_port_min, source_port_max, destination_port_min, destination_port_max, source_ip, destination_ip, logical_source_port, logical_destination_port, description, l7_param_expected] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values) def test_list_flow_classifier(self): """List available flow-classifiers.""" resources = "flow_classifiers" cmd = fc.FlowClassifierList(test_cli20.MyApp(sys.stdout), None) self._test_list_resources(resources, cmd, True) def test_list_flow_classifier_sort(self): """flow_classifier-list --sort-key name --sort-key id --sort-key asc --sort-key desc """ resources = "flow_classifiers" cmd = fc.FlowClassifierList(test_cli20.MyApp(sys.stdout), None) self._test_list_resources(resources, cmd, sort_key=["name", "id"], sort_dir=["asc", "desc"]) def test_list_flow_classifier_limit(self): """flow-classifier-list -P.""" resources = "flow_classifiers" cmd = fc.FlowClassifierList(test_cli20.MyApp(sys.stdout), None) self._test_list_resources(resources, cmd, page_size=1000) def test_show_flow_classifier_id(self): """flow-classifier-show test_id.""" resource = 'flow_classifier' cmd = fc.FlowClassifierShow(test_cli20.MyApp(sys.stdout), None) args = ['--fields', 'id', self.test_id] self._test_show_resource(resource, cmd, self.test_id, args, ['id']) def test_show_flow_classifier_id_name(self): """flow-classifier-show .""" resource = 'flow_classifier' cmd = fc.FlowClassifierShow(test_cli20.MyApp(sys.stdout), None) args = ['--fields', 'id', '--fields', 'name', self.test_id] self._test_show_resource(resource, cmd, self.test_id, args, ['id', 'name']) def test_update_flow_classifier_description(self): """flow-classifier-update myid --description mydesc.""" resource = 'flow_classifier' cmd = fc.FlowClassifierUpdate(test_cli20.MyApp(sys.stdout), None) myid = 'myid' args = [myid, '--description', 'flow_classifier1', '--description', 'flow_classifier2'] updatefields = {'description': 'flow_classifier2'} self._test_update_resource(resource, cmd, myid, args, updatefields) def test_update_flow_classifier_name(self): """flow-classifier-update myid --name myname.""" resource = 'flow_classifier' cmd = fc.FlowClassifierUpdate(test_cli20.MyApp(sys.stdout), None) self._test_update_resource(resource, cmd, 'myid', ['myid', '--name', 'myname'], {'name': 'myname'}) def test_delete_flow_classifer(self): """flow-classifier-delete my-id.""" resource = 'flow_classifier' cmd = fc.FlowClassifierDelete(test_cli20.MyApp(sys.stdout), None) my_id = 'myid1' args = [my_id] self._test_delete_resource(resource, cmd, my_id, args) networking-sfc-10.0.0/networking_sfc/tests/unit/cli/test_port_pair.py0000664000175000017500000001531513656750333026114 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies India Pvt. Ltd. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import sys from unittest import mock from neutronclient import shell from neutronclient.tests.unit import test_cli20 from networking_sfc.cli import port_pair as pp from oslo_utils import uuidutils ingress_port_UUID = uuidutils.generate_uuid() egress_port_UUID = uuidutils.generate_uuid() class CLITestV20PortPairExtensionJSON(test_cli20.CLITestV20Base): def setUp(self): super(CLITestV20PortPairExtensionJSON, self).setUp() self._mock_extension_loading() self.register_non_admin_status_resource('port_pair') def _create_patch(self, name, func=None): patcher = mock.patch(name) thing = patcher.start() self.addCleanup(patcher.stop) return thing def _mock_extension_loading(self): ext_pkg = 'neutronclient.common.extension' port_pair = self._create_patch(ext_pkg + '._discover_via_entry_points') port_pair.return_value = [("port_pair", pp)] return port_pair def test_ext_cmd_loaded(self): neutron_shell = shell.NeutronShell('2.0') ext_cmd = {'port-pair-list': pp.PortPairList, 'port-pair-create': pp.PortPairCreate, 'port-pair-update': pp.PortPairUpdate, 'port-pair-delete': pp.PortPairDelete, 'port-pair-show': pp.PortPairShow} for cmd_name, cmd_class in ext_cmd.items(): found = neutron_shell.command_manager.find_command([cmd_name]) self.assertEqual(cmd_class, found[0]) def test_create_port_pair_with_mandatory_param(self): """Create port_pair: myname.""" resource = 'port_pair' cmd = pp.PortPairCreate(test_cli20.MyApp(sys.stdout), None) name = 'myname' myid = 'myid' args = [name, '--ingress', ingress_port_UUID, '--egress', egress_port_UUID] position_names = ['name', 'ingress', 'egress'] position_values = [name, ingress_port_UUID, egress_port_UUID] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values) def test_create_port_pair_with_bidirectional_port(self): """Create port_pair: myname with bidirectional port.""" resource = 'port_pair' cmd = pp.PortPairCreate(test_cli20.MyApp(sys.stdout), None) name = 'myname' myid = 'myid' args = [name, '--ingress', ingress_port_UUID, '--egress', ingress_port_UUID] position_names = ['name', 'ingress', 'egress'] position_values = [name, ingress_port_UUID, ingress_port_UUID] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values) def test_create_port_pair_with_all_param(self): """Create port_pair: myname with all parameter""" resource = 'port_pair' cmd = pp.PortPairCreate(test_cli20.MyApp(sys.stdout), None) name = 'myname' myid = 'myid' desc = "my_port_pair" service_fn_param = 'correlation=None,weight=2' service_fn_param_exp = {"correlation": "None", "weight": "2"} args = [name, '--ingress', ingress_port_UUID, '--egress', egress_port_UUID, '--description', desc, '--service-function-parameters', service_fn_param] position_names = ['name', 'ingress', 'egress', 'description', 'service_function_parameters'] position_values = [name, ingress_port_UUID, egress_port_UUID, desc, service_fn_param_exp] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values) def test_update_port_pair_description(self): """Update port_pair: myid --description mydesc.""" resource = 'port_pair' desc1 = "My_New_Port_Pair" cmd = pp.PortPairUpdate(test_cli20.MyApp(sys.stdout), None) self._test_update_resource(resource, cmd, 'myid', ['myid', '--description', desc1], {'description': desc1}) def test_update_port_pair_name(self): """Update port_pair: myid --name myname.""" resource = 'port_pair' my_name = "My_New_Port_Pair" cmd = pp.PortPairUpdate(test_cli20.MyApp(sys.stdout), None) self._test_update_resource(resource, cmd, 'myid', ['myid', '--name', my_name], {'name': my_name}) def test_delete_port_pair(self): """Delete port-pair: myid.""" resource = 'port_pair' cmd = pp.PortPairDelete(test_cli20.MyApp(sys.stdout), None) myid = 'myid' args = [myid] self._test_delete_resource(resource, cmd, myid, args) def test_list_port_pair(self): """List port_pairs.""" resources = 'port_pairs' cmd = pp.PortPairList(test_cli20.MyApp(sys.stdout), None) self._test_list_resources(resources, cmd, True) def test_list_port_pair_limit(self): """size (1000) limited list: port-pair -P.""" resources = "port_pairs" cmd = pp.PortPairList(test_cli20.MyApp(sys.stdout), None) self._test_list_resources(resources, cmd, page_size=1000) def test_list_port_pairs_sort(self): """List port_pairs: --sort-key name --sort-key id --sort-key asc --sort-key desc """ resources = "port_pairs" cmd = pp.PortPairList(test_cli20.MyApp(sys.stdout), None) self._test_list_resources(resources, cmd, sort_key=["name", "id"], sort_dir=["asc", "desc"]) def test_show_port_pair(self): """Show port-pairs: --fields id --fields name myid.""" resource = 'port_pair' cmd = pp.PortPairShow(test_cli20.MyApp(sys.stdout), None) args = ['--fields', 'id', '--fields', 'name', self.test_id] self._test_show_resource(resource, cmd, self.test_id, args, ['id', 'name']) networking-sfc-10.0.0/networking_sfc/tests/unit/cli/test_port_pair_group.py0000664000175000017500000001741713656750333027335 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies India Pvt. Ltd. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import sys from unittest import mock from neutronclient import shell from neutronclient.tests.unit import test_cli20 from networking_sfc.cli import port_pair_group as pg from oslo_utils import uuidutils pp1 = uuidutils.generate_uuid() pp2 = uuidutils.generate_uuid() pp3 = uuidutils.generate_uuid() pp4 = uuidutils.generate_uuid() class CLITestV20PortGroupExtensionJSON(test_cli20.CLITestV20Base): def setUp(self): super(CLITestV20PortGroupExtensionJSON, self).setUp() self._mock_extension_loading() self.register_non_admin_status_resource('port_pair_group') def _create_patch(self, name, func=None): patcher = mock.patch(name) thing = patcher.start() self.addCleanup(patcher.stop) return thing def _mock_extension_loading(self): ext_pkg = 'neutronclient.common.extension' port_pair_group = self._create_patch(ext_pkg + '._discover_via_entry_points') port_pair_group.return_value = [("port_pair_group", pg)] return port_pair_group def test_ext_cmd_loaded(self): neutron_shell = shell.NeutronShell('2.0') ext_cmd = {'port-pair-group-list': pg.PortPairGroupList, 'port-pair-group-create': pg.PortPairGroupCreate, 'port-pair-group-update': pg.PortPairGroupUpdate, 'port-pair-group-delete': pg.PortPairGroupDelete, 'port-pair-group-show': pg.PortPairGroupShow} for cmd_name, cmd_class in ext_cmd.items(): found = neutron_shell.command_manager.find_command([cmd_name]) self.assertEqual(cmd_class, found[0]) def test_create_port_pair_group_with_mandatory_args(self): """Create port_pair_group: myname.""" resource = 'port_pair_group' cmd = pg.PortPairGroupCreate(test_cli20.MyApp(sys.stdout), None) name = 'myname' myid = 'myid' args = [name, '--port-pair', pp1] position_names = ['name', 'port_pairs'] position_values = [name, [pp1]] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values) def test_create_port_pair_group_with_multi_port_pairs(self): """Create port_pair_group: myname with multiple port pairs""" resource = 'port_pair_group' cmd = pg.PortPairGroupCreate(test_cli20.MyApp(sys.stdout), None) name = 'myname' myid = 'myid' args = [name, '--port-pair', pp1, '--port-pair', pp2] position_names = ['name', 'port_pairs'] position_values = [name, [pp1, pp2]] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values) def test_create_port_pair_group_with_lb_fields_param(self): """Create port_pair_group: myname with lb_fields parameter""" resource = 'port_pair_group' cmd = pg.PortPairGroupCreate(test_cli20.MyApp(sys.stdout), None) name = 'myname' myid = 'myid' ppg_param = 'lb_fields=ip_src&ip_dst' ppg_exp = {"lb_fields": ["ip_src", "ip_dst"]} args = [name, '--port-pair', pp1, '--port-pair-group-parameters', ppg_param] position_names = ['name', 'port_pairs', 'port_pair_group_parameters'] position_values = [name, [pp1], ppg_exp] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values) def test_create_port_pair_group_with_ppg_n_tuple_mapping_param(self): """Create port_pair_group: myname with ppg_n_tuple_mapping parameter""" resource = 'port_pair_group' cmd = pg.PortPairGroupCreate(test_cli20.MyApp(sys.stdout), None) name = 'myname' myid = 'myid' ppg_param = ('ppg_n_tuple_mapping=source_ip_prefix_ingress=None' '&source_ip_prefix_egress=None') ppg_exp = { 'ppg_n_tuple_mapping': { 'ingress_n_tuple': {'source_ip_prefix': 'None'}, 'egress_n_tuple': {'source_ip_prefix': 'None'}}} args = [name, '--port-pair', pp1, '--port-pair-group-parameters', ppg_param] position_names = ['name', 'port_pairs', 'port_pair_group_parameters'] position_values = [name, [pp1], ppg_exp] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values) def test_delete_port_pair_group(self): """Delete port_pair_group: myid.""" resource = 'port_pair_group' cmd = pg.PortPairGroupDelete(test_cli20.MyApp(sys.stdout), None) myid = 'myid' args = [myid] self._test_delete_resource(resource, cmd, myid, args) def test_update_port_group_only_port_pair(self): """Update port_pair_group""" resource = 'port_pair_group' cmd = pg.PortPairGroupUpdate(test_cli20.MyApp(sys.stdout), None) myid = 'myid' args = [myid, '--port-pair', pp1, '--port-pair', pp2] updatefields = {'port_pairs': [pp1, pp2]} self._test_update_resource(resource, cmd, myid, args, updatefields) def test_update_port_group_with_all_desc(self): """Update port_pair_group and description""" resource = 'port_pair_group' cmd = pg.PortPairGroupUpdate(test_cli20.MyApp(sys.stdout), None) myid = 'myid' args = [myid, '--port-pair', pp1, '--port-pair', pp2, '--description', 'my_port_pair_group'] updatefields = {'port_pairs': [pp1, pp2], 'description': 'my_port_pair_group'} self._test_update_resource(resource, cmd, myid, args, updatefields) def test_list_port_pair_group(self): """List port_pair_group.""" resources = 'port_pair_groups' cmd = pg.PortPairGroupList(test_cli20.MyApp(sys.stdout), None) self._test_list_resources(resources, cmd, True) def test_list_port_pair_group_limit(self): """size (1000) limited list: port-pair-group -P.""" resources = "port_pair_groups" cmd = pg.PortPairGroupList(test_cli20.MyApp(sys.stdout), None) self._test_list_resources(resources, cmd, page_size=1000) def test_list_port_group_sort(self): """List port_pair_group: --sort-key name --sort-key id --sort-key asc --sort-key desc """ resources = "port_pair_groups" cmd = pg.PortPairGroupList(test_cli20.MyApp(sys.stdout), None) self._test_list_resources(resources, cmd, sort_key=["name", "id"], sort_dir=["asc", "desc"]) def test_show_port_group(self): """Show port-chain: --fields id --fields name myid.""" resource = 'port_pair_group' cmd = pg.PortPairGroupShow(test_cli20.MyApp(sys.stdout), None) args = ['--fields', 'id', '--fields', 'name', self.test_id] self._test_show_resource(resource, cmd, self.test_id, args, ['id', 'name']) networking-sfc-10.0.0/networking_sfc/tests/unit/cli/test_port_chain.py0000664000175000017500000003116113656750333026240 0ustar zuulzuul00000000000000# Copyright 2015 Huawei Technologies India Pvt. Ltd. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import sys from unittest import mock from neutronclient import shell from neutronclient.tests.unit import test_cli20 from networking_sfc.cli import port_chain as pc from oslo_utils import uuidutils FAKE_port_pair_group1_UUID = uuidutils.generate_uuid() FAKE_port_pair_group2_UUID = uuidutils.generate_uuid() FAKE_FC1_UUID = uuidutils.generate_uuid() FAKE_FC2_UUID = uuidutils.generate_uuid() FAKE_PARAM1_UUID = uuidutils.generate_uuid() FAKE_PARAM2_UUID = uuidutils.generate_uuid() class CLITestV20PortChainExtensionJSON(test_cli20.CLITestV20Base): def setUp(self): super(CLITestV20PortChainExtensionJSON, self).setUp() self._mock_extension_loading() self.register_non_admin_status_resource('port_chain') def _create_patch(self, name, func=None): patcher = mock.patch(name) thing = patcher.start() self.addCleanup(patcher.stop) return thing def _mock_extension_loading(self): ext_pkg = 'neutronclient.common.extension' port_chain = self._create_patch(ext_pkg + '._discover_via_entry_points') port_chain.return_value = [("port_chain", pc)] return port_chain def test_ext_cmd_loaded(self): neutron_shell = shell.NeutronShell('2.0') ext_cmd = {'port-chain-list': pc.PortChainList, 'port-chain-create': pc.PortChainCreate, 'port-chain-update': pc.PortChainUpdate, 'port-chain-delete': pc.PortChainDelete, 'port-chain-show': pc.PortChainShow} for cmd_name, cmd_class in ext_cmd.items(): found = neutron_shell.command_manager.find_command([cmd_name]) self.assertEqual(cmd_class, found[0]) def test_create_port_chain_with_mandatory_param(self): """Create port_chain: myname.""" resource = 'port_chain' cmd = pc.PortChainCreate(test_cli20.MyApp(sys.stdout), None) name = 'myname' myid = 'myid' args = [name, '--port-pair-group', FAKE_port_pair_group1_UUID] position_names = ['name', 'port_pair_groups'] position_values = [name, [FAKE_port_pair_group1_UUID]] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values) def test_create_port_chain_with_multiple_port_pair_group(self): """Create port_chain: myname.""" resource = 'port_chain' cmd = pc.PortChainCreate(test_cli20.MyApp(sys.stdout), None) name = 'myname' myid = 'myid' args = [name, '--port-pair-group', FAKE_port_pair_group1_UUID, '--port-pair-group', FAKE_port_pair_group2_UUID] position_names = ['name', 'port_pair_groups'] position_values = [name, [FAKE_port_pair_group1_UUID, FAKE_port_pair_group2_UUID]] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values) def test_create_port_chain_with_all_params(self): """Create port_chain: myname.""" resource = 'port_chain' cmd = pc.PortChainCreate(test_cli20.MyApp(sys.stdout), None) name = 'myname' myid = 'myid' desc = 'check port chain cli' chain_parameter = "correlation=mpls" chain_parameter_expected = {"correlation": "mpls"} args = [name, '--description', desc, '--port-pair-group', FAKE_port_pair_group1_UUID, '--flow-classifier', FAKE_FC1_UUID, '--chain-parameters', chain_parameter] position_names = ['name', 'description', 'port_pair_groups', 'flow_classifiers', 'chain_parameters'] position_values = [name, desc, [FAKE_port_pair_group1_UUID], [FAKE_FC1_UUID], chain_parameter_expected] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values) def test_create_port_chain_with_single_classifier(self): """Create port_chain: myname.""" resource = 'port_chain' cmd = pc.PortChainCreate(test_cli20.MyApp(sys.stdout), None) name = 'myname' myid = 'myid' args = [name, '--port-pair-group', FAKE_port_pair_group1_UUID, '--flow-classifier', FAKE_FC1_UUID] position_names = ['name', 'port_pair_groups', 'flow_classifiers'] position_values = [name, [FAKE_port_pair_group1_UUID], [FAKE_FC1_UUID]] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values) def test_create_port_chain_with_chain_parameters(self): """Create port_chain: myname.""" resource = 'port_chain' cmd = pc.PortChainCreate(test_cli20.MyApp(sys.stdout), None) name = 'myname' myid = 'myid' args = [name, '--port-pair-group', FAKE_port_pair_group1_UUID, '--chain-parameters', 'symmetric=True'] position_names = ['name', 'port_pair_groups', 'chain_parameters'] position_values = [name, [FAKE_port_pair_group1_UUID], {'symmetric': 'True'}] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values) def test_create_port_chain_with_multiple_classifiers(self): """Create port_chain: myname.""" resource = 'port_chain' cmd = pc.PortChainCreate(test_cli20.MyApp(sys.stdout), None) name = 'myname' myid = 'myid' args = [name, '--port-pair-group', FAKE_port_pair_group1_UUID, '--flow-classifier', FAKE_FC1_UUID, '--flow-classifier', FAKE_FC2_UUID] position_names = ['name', 'port_pair_groups', 'flow_classifiers'] position_values = [name, [FAKE_port_pair_group1_UUID], [FAKE_FC1_UUID, FAKE_FC2_UUID]] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values) def test_update_port_chain(self): """Update port_chain: myid --name myname.""" resource = 'port_chain' cmd = pc.PortChainUpdate(test_cli20.MyApp(sys.stdout), None) self._test_update_resource(resource, cmd, 'myid', ['myid', '--name', 'myname'], {'name': 'myname'}) def test_update_port_chain_with_no_flow_classifier(self): """Update port_chain: myid --name myname --no-flow-classifier None.""" resource = 'port_chain' cmd = pc.PortChainUpdate(test_cli20.MyApp(sys.stdout), None) self._test_update_resource(resource, cmd, 'myid', ['myid', '--name', 'myname', '--no-flow-classifier'], {'name': 'myname', 'flow_classifiers': []}) def test_update_port_chain_with_single_port_pair_group(self): """Update port_chain: myid --name myname --port-pair-group uuid.""" resource = 'port_chain' cmd = pc.PortChainUpdate(test_cli20.MyApp(sys.stdout), None) self._test_update_resource(resource, cmd, 'myid', ['myid', '--name', 'myname', '--port-pair-group', FAKE_port_pair_group1_UUID, '--no-flow-classifier'], {'name': 'myname', 'port_pair_groups': [ FAKE_port_pair_group1_UUID], 'flow_classifiers': []}) def test_update_port_chain_with_multi_port_pair_groups(self): """Update port_chain: myid --name myname --port-pair-group uuid ...""" resource = 'port_chain' cmd = pc.PortChainUpdate(test_cli20.MyApp(sys.stdout), None) self._test_update_resource(resource, cmd, 'myid', ['myid', '--name', 'myname', '--port-pair-group', FAKE_port_pair_group1_UUID, '--port-pair-group', FAKE_port_pair_group2_UUID, '--no-flow-classifier'], {'name': 'myname', 'port_pair_groups': [ FAKE_port_pair_group1_UUID, FAKE_port_pair_group2_UUID], 'flow_classifiers': []}) def test_update_port_chain_with_single_classifier(self): """Update port_chain: myid --name myname --flow-classifier uuid.""" resource = 'port_chain' cmd = pc.PortChainUpdate(test_cli20.MyApp(sys.stdout), None) self._test_update_resource(resource, cmd, 'myid', ['myid', '--name', 'myname', '--flow-classifier', FAKE_FC1_UUID], {'name': 'myname', 'flow_classifiers': [FAKE_FC1_UUID]}) def test_update_port_chain_with_multi_classifiers(self): """Update port_chain: myid --name myname --flow-classifier uuid ...""" resource = 'port_chain' cmd = pc.PortChainUpdate(test_cli20.MyApp(sys.stdout), None) self._test_update_resource(resource, cmd, 'myid', ['myid', '--name', 'myname', '--flow-classifier', FAKE_FC1_UUID, '--flow-classifier', FAKE_FC2_UUID], {'name': 'myname', 'flow_classifiers': [ FAKE_FC1_UUID, FAKE_FC2_UUID]}) def test_update_port_chain_with_port_pair_group_classifier(self): """Update port_chain.""" resource = 'port_chain' cmd = pc.PortChainUpdate(test_cli20.MyApp(sys.stdout), None) self._test_update_resource(resource, cmd, 'myid', ['myid', '--name', 'myname', '--flow-classifier', FAKE_FC1_UUID, '--port-pair-group', FAKE_port_pair_group1_UUID], {'name': 'myname', 'flow_classifiers': [FAKE_FC1_UUID], 'port_pair_groups': [ FAKE_port_pair_group1_UUID]}) def test_delete_port_chain(self): """Delete port-chain: myid.""" resource = 'port_chain' cmd = pc.PortChainDelete(test_cli20.MyApp(sys.stdout), None) myid = 'myid' args = [myid] self._test_delete_resource(resource, cmd, myid, args) def test_list_port_chain(self): """List port_chain.""" resources = 'port_chains' cmd = pc.PortChainList(test_cli20.MyApp(sys.stdout), None) self._test_list_resources(resources, cmd, True) def test_list_port_chains_sort(self): """List port_chains: --sort-key name --sort-key id --sort-key asc --sort-key desc """ resources = "port_chains" cmd = pc.PortChainList(test_cli20.MyApp(sys.stdout), None) self._test_list_resources(resources, cmd, sort_key=["name", "id"], sort_dir=["asc", "desc"]) def test_show_port_chain(self): """Show port-chain: --fields id --fields name myid.""" resource = 'port_chain' cmd = pc.PortChainShow(test_cli20.MyApp(sys.stdout), None) args = ['--fields', 'id', '--fields', 'name', self.test_id] self._test_show_resource(resource, cmd, self.test_id, args, ['id', 'name']) networking-sfc-10.0.0/networking_sfc/tests/unit/services/0000775000175000017500000000000013656750461023555 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/__init__.py0000664000175000017500000000000013656750333025652 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/flowclassifier/0000775000175000017500000000000013656750461026571 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/flowclassifier/test_driver_manager.py0000664000175000017500000001132513656750333033167 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import Mock from stevedore.extension import Extension from neutron.tests import base from networking_sfc.services.flowclassifier.common import exceptions as fc_exc from networking_sfc.services.flowclassifier.driver_manager \ import FlowClassifierDriverManager class DriverManagerTestCase(base.BaseTestCase): def setUp(self): super(DriverManagerTestCase, self).setUp() def test_initialize_called(self): driver1 = Extension('mock_driver1', Mock(), None, Mock(native_bulk_support=True)) driver2 = Extension('mock_driver2', Mock(), None, Mock(native_bulk_support=True)) manager = FlowClassifierDriverManager.make_test_instance([driver1, driver2]) manager.initialize() driver1.obj.initialize.assert_called_once_with() driver2.obj.initialize.assert_called_once_with() def _test_method_called(self, method_name): driver1 = Extension('mock_driver1', Mock(), None, Mock(native_bulk_support=True)) driver2 = Extension('mock_driver2', Mock(), None, Mock(native_bulk_support=True)) manager = FlowClassifierDriverManager.make_test_instance([driver1, driver2]) mocked_context = Mock() getattr(manager, method_name)(mocked_context) getattr(driver1.obj, method_name).assert_called_once_with( mocked_context) getattr(driver2.obj, method_name).assert_called_once_with( mocked_context) def _test_method_exception(self, method_name, expected_exc=fc_exc.FlowClassifierDriverError): driver = Extension('mock_driver', Mock(), None, Mock(native_bulk_support=True)) mock_method = Mock(side_effect=fc_exc.FlowClassifierException) setattr(driver.obj, method_name, mock_method) manager = FlowClassifierDriverManager.make_test_instance([driver]) mocked_context = Mock() self.assertRaises(expected_exc, getattr(manager, method_name), mocked_context) def test_create_flow_classifier_precommit_called(self): self._test_method_called("create_flow_classifier_precommit") def test_create_flow_classifier_precommit_exception(self): self._test_method_exception("create_flow_classifier_precommit", fc_exc.FlowClassifierException) def test_create_flow_classifier_postcommit_called(self): self._test_method_called("create_flow_classifier_postcommit") def test_create_flow_classifier_postcommit_exception(self): self._test_method_exception("create_flow_classifier_postcommit") def test_update_flow_classifier_precommit_called(self): self._test_method_called("update_flow_classifier_precommit") def test_update_flow_classifier_precommit_exception(self): self._test_method_exception("update_flow_classifier_precommit") def test_update_flow_classifier_postcommit_called(self): self._test_method_called("update_flow_classifier_postcommit") def test_update_flow_classifier_postcommit_exception(self): self._test_method_exception("update_flow_classifier_postcommit") def test_delete_flow_classifier_called(self): self._test_method_called("delete_flow_classifier") def test_delete_flow_classifier_exception(self): self._test_method_exception("delete_flow_classifier") def test_delete_flow_classifier_precommit_called(self): self._test_method_called("delete_flow_classifier_precommit") def test_delete_flow_classifier_precommit_exception(self): self._test_method_exception("delete_flow_classifier_precommit") def test_delete_flow_classifier_postcommit_called(self): self._test_method_called("delete_flow_classifier_postcommit") def test_delete_flow_classifier_postcommit_exception(self): self._test_method_exception("delete_flow_classifier_postcommit") networking-sfc-10.0.0/networking_sfc/tests/unit/services/flowclassifier/__init__.py0000664000175000017500000000000013656750333030666 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/flowclassifier/drivers/0000775000175000017500000000000013656750461030247 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/flowclassifier/drivers/__init__.py0000664000175000017500000000000013656750333032344 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/flowclassifier/drivers/ovs/0000775000175000017500000000000013656750461031056 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/flowclassifier/drivers/ovs/__init__.py0000664000175000017500000000000013656750333033153 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/flowclassifier/drivers/ovs/test_driver.py0000664000175000017500000001005613656750333033762 0ustar zuulzuul00000000000000# Copyright 2016 Huawei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import context from oslo_utils import importutils from neutron.api import extensions as api_ext from neutron.common import config from networking_sfc.db import flowclassifier_db as fdb from networking_sfc.extensions import flowclassifier from networking_sfc.services.flowclassifier.common import context as fc_ctx from networking_sfc.services.flowclassifier.common import exceptions as fc_exc from networking_sfc.services.flowclassifier.drivers.ovs import driver from networking_sfc.tests import base from networking_sfc.tests.unit.db import test_flowclassifier_db class OVSFlowClassifierDriverTestCase( test_flowclassifier_db.FlowClassifierDbPluginTestCaseBase, base.NeutronDbPluginV2TestCase ): resource_prefix_map = dict([ (k, flowclassifier.FLOW_CLASSIFIER_PREFIX) for k in flowclassifier.RESOURCE_ATTRIBUTE_MAP.keys() ]) def setUp(self): flowclassifier_plugin = ( test_flowclassifier_db.DB_FLOWCLASSIFIER_PLUGIN_CLASS) service_plugins = { flowclassifier.FLOW_CLASSIFIER_EXT: flowclassifier_plugin } fdb.FlowClassifierDbPlugin.supported_extension_aliases = [ flowclassifier.FLOW_CLASSIFIER_EXT] fdb.FlowClassifierDbPlugin.path_prefix = ( flowclassifier.FLOW_CLASSIFIER_PREFIX ) super(OVSFlowClassifierDriverTestCase, self).setUp( ext_mgr=None, plugin=None, service_plugins=service_plugins ) self.flowclassifier_plugin = importutils.import_object( flowclassifier_plugin) ext_mgr = api_ext.PluginAwareExtensionManager( test_flowclassifier_db.extensions_path, { flowclassifier.FLOW_CLASSIFIER_EXT: self.flowclassifier_plugin } ) app = config.load_paste_app('extensions_test_app') self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr) self.ctx = context.get_admin_context() self.driver = driver.OVSFlowClassifierDriver() self.driver.initialize() def tearDown(self): super(OVSFlowClassifierDriverTestCase, self).tearDown() def test_create_flow_classifier_precommit(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'logical_source_port': src_port['port']['id'] }) as fc: fc_context = fc_ctx.FlowClassifierContext( self.flowclassifier_plugin, self.ctx, fc['flow_classifier'] ) self.driver.create_flow_classifier_precommit(fc_context) def test_create_flow_classifier_precommit_no_logical_source_port(self): with self.flow_classifier(flow_classifier={ 'name': 'test1', 'logical_source_port': None }) as fc: fc_context = fc_ctx.FlowClassifierContext( self.flowclassifier_plugin, self.ctx, fc['flow_classifier'] ) self.assertRaises( fc_exc.FlowClassifierBadRequest, self.driver.create_flow_classifier_precommit, fc_context) networking-sfc-10.0.0/networking_sfc/tests/unit/services/flowclassifier/test_plugin.py0000664000175000017500000003365113656750333031506 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from networking_sfc.services.flowclassifier.common import context as fc_ctx from networking_sfc.services.flowclassifier.common import exceptions as fc_exc from networking_sfc.tests.unit.db import test_flowclassifier_db FLOWCLASSIFIER_PLUGIN_KLASS = ( "networking_sfc.services.flowclassifier." "plugin.FlowClassifierPlugin" ) class FlowClassifierPluginTestCase( test_flowclassifier_db.FlowClassifierDbPluginTestCase ): def setUp( self, core_plugin=None, flowclassifier_plugin=None, ext_mgr=None ): if not flowclassifier_plugin: flowclassifier_plugin = FLOWCLASSIFIER_PLUGIN_KLASS self.driver_manager_p = mock.patch( 'networking_sfc.services.flowclassifier.driver_manager.' 'FlowClassifierDriverManager' ) self.fake_driver_manager_class = self.driver_manager_p.start() self.fake_driver_manager = mock.Mock() self.fake_driver_manager_class.return_value = self.fake_driver_manager self.plugin_context = None self.plugin_context_precommit = None self.plugin_context_postcommit = None super(FlowClassifierPluginTestCase, self).setUp( core_plugin=core_plugin, flowclassifier_plugin=flowclassifier_plugin, ext_mgr=ext_mgr ) def _record_context(self, plugin_context): self.plugin_context = plugin_context def _record_context_precommit(self, plugin_context): self.plugin_context_precommit = plugin_context def _record_context_postcommit(self, plugin_context): self.plugin_context_postcommit = plugin_context def test_create_flow_classifier_driver_manager_called(self): self.fake_driver_manager.create_flow_classifier_precommit = mock.Mock( side_effect=self._record_context_precommit) self.fake_driver_manager.create_flow_classifier_postcommit = mock.Mock( side_effect=self._record_context_postcommit) with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'logical_source_port': port['port']['id'] }) as fc: driver_manager = self.fake_driver_manager (driver_manager.create_flow_classifier_precommit .assert_called_once_with(mock.ANY)) (driver_manager.create_flow_classifier_postcommit .assert_called_once_with(mock.ANY)) self.assertIsInstance( self.plugin_context_precommit, fc_ctx.FlowClassifierContext) self.assertIsInstance( self.plugin_context_postcommit, fc_ctx.FlowClassifierContext) self.assertIn('flow_classifier', fc) self.assertEqual( self.plugin_context_precommit.current, fc['flow_classifier']) self.assertEqual( self.plugin_context_postcommit.current, fc['flow_classifier']) def test_create_flow_classifier_postcommit_driver_manager_exception(self): self.fake_driver_manager.create_flow_classifier_postcommit = mock.Mock( side_effect=fc_exc.FlowClassifierDriverError( method='create_flow_classifier_postcommit' ) ) with self.port( name='test1' ) as port: self._create_flow_classifier( self.fmt, {'logical_source_port': port['port']['id']}, expected_res_status=500) driver_manager = self.fake_driver_manager (driver_manager.create_flow_classifier_precommit .assert_called_once_with(mock.ANY)) (driver_manager.create_flow_classifier_postcommit .assert_called_once_with(mock.ANY)) (driver_manager.delete_flow_classifier .assert_called_once_with(mock.ANY)) (driver_manager.delete_flow_classifier_precommit .assert_called_once_with(mock.ANY)) (driver_manager.delete_flow_classifier_postcommit .assert_called_once_with(mock.ANY)) self._test_list_resources('flow_classifier', []) def test_create_flow_classifier_precommit_driver_manager_exception(self): self.fake_driver_manager.create_flow_classifier_precommit = mock.Mock( side_effect=fc_exc.FlowClassifierDriverError( method='create_flow_classifier_precommit' ) ) with self.port( name='test1' ) as port: self._test_list_resources('flow_classifier', []) self._create_flow_classifier( self.fmt, {'logical_source_port': port['port']['id']}, expected_res_status=500) self._test_list_resources('flow_classifier', []) driver_manager = self.fake_driver_manager (driver_manager.create_flow_classifier_precommit .assert_called_once_with(mock.ANY)) (driver_manager.create_flow_classifier_postcommit .assert_not_called()) driver_manager.delete_flow_classifier.assert_not_called() (driver_manager.delete_flow_classifier_precommit .assert_not_called()) (driver_manager.delete_flow_classifier_postcommit .assert_not_called()) self._test_list_resources('flow_classifier', []) def test_update_flow_classifier_driver_manager_called(self): self.fake_driver_manager.update_flow_classifier_precommit = mock.Mock( side_effect=self._record_context_precommit) self.fake_driver_manager.update_flow_classifier_postcommit = mock.Mock( side_effect=self._record_context_postcommit) with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'logical_source_port': port['port']['id'] }) as fc: req = self.new_update_request( 'flow_classifiers', {'flow_classifier': {'name': 'test2'}}, fc['flow_classifier']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) driver_manager = self.fake_driver_manager (driver_manager.update_flow_classifier_precommit .assert_called_once_with(mock.ANY)) (driver_manager.update_flow_classifier_postcommit .assert_called_once_with(mock.ANY)) self.assertIsInstance( self.plugin_context_precommit, fc_ctx.FlowClassifierContext) self.assertIsInstance(self.plugin_context_postcommit, fc_ctx.FlowClassifierContext) self.assertIn('flow_classifier', fc) self.assertIn('flow_classifier', res) self.assertEqual(self.plugin_context_precommit.current, res['flow_classifier']) self.assertEqual(self.plugin_context_postcommit.current, res['flow_classifier']) self.assertEqual(self.plugin_context_precommit.original, fc['flow_classifier']) self.assertEqual(self.plugin_context_postcommit.original, fc['flow_classifier']) def _test_update_flow_classifier_driver_manager_exception(self, updated): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'logical_source_port': port['port']['id'] }) as fc: self.assertIn('flow_classifier', fc) original_flow_classifier = fc['flow_classifier'] req = self.new_update_request( 'flow_classifiers', {'flow_classifier': {'name': 'test2'}}, fc['flow_classifier']['id'] ) updated_flow_classifier = copy.copy(original_flow_classifier) if updated: updated_flow_classifier['name'] = 'test2' res = req.get_response(self.ext_api) self.assertEqual(500, res.status_int) driver_manager = self.fake_driver_manager (driver_manager.update_flow_classifier_precommit .assert_called_once_with(mock.ANY)) if updated: (driver_manager.update_flow_classifier_postcommit .assert_called_once_with(mock.ANY)) else: (driver_manager.update_flow_classifier_postcommit .assert_not_called()) res = self._list('flow_classifiers') self.assertIn('flow_classifiers', res) self.assertItemsEqual( res['flow_classifiers'], [updated_flow_classifier]) def test_update_flow_classifier_precommit_driver_manager_exception(self): self.fake_driver_manager.update_flow_classifier_precommit = mock.Mock( side_effect=fc_exc.FlowClassifierDriverError( method='update_flow_classifier_precommit' ) ) self._test_update_flow_classifier_driver_manager_exception(False) def test_update_flow_classifier_postcommit_driver_manager_exception(self): self.fake_driver_manager.update_flow_classifier_postcommit = mock.Mock( side_effect=fc_exc.FlowClassifierDriverError( method='update_flow_classifier_postcommit' ) ) self._test_update_flow_classifier_driver_manager_exception(True) def test_delete_flow_classifer_driver_manager_called(self): self.fake_driver_manager.delete_flow_classifier = mock.Mock( side_effect=self._record_context) self.fake_driver_manager.delete_flow_classifier_precommit = mock.Mock( side_effect=self._record_context_precommit) self.fake_driver_manager.delete_flow_classifier_postcommit = mock.Mock( side_effect=self._record_context_postcommit) with self.port( name='test1' ) as port: with self.flow_classifier( flow_classifier={'logical_source_port': port['port']['id']}, do_delete=False ) as fc: req = self.new_delete_request( 'flow_classifiers', fc['flow_classifier']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(204, res.status_int) driver_manager = self.fake_driver_manager (driver_manager.delete_flow_classifier .assert_called_once_with(mock.ANY)) (driver_manager.delete_flow_classifier_precommit .assert_called_once_with(mock.ANY)) (driver_manager.delete_flow_classifier_postcommit .assert_called_once_with(mock.ANY)) self.assertIsInstance( self.plugin_context, fc_ctx.FlowClassifierContext ) self.assertIsInstance( self.plugin_context_precommit, fc_ctx.FlowClassifierContext ) self.assertIsInstance(self.plugin_context_postcommit, fc_ctx.FlowClassifierContext) self.assertIn('flow_classifier', fc) self.assertEqual( self.plugin_context.current, fc['flow_classifier']) self.assertEqual(self.plugin_context_precommit.current, fc['flow_classifier']) self.assertEqual(self.plugin_context_postcommit.current, fc['flow_classifier']) def _test_delete_flow_classifier_driver_manager_exception(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'logical_source_port': port['port']['id'] }, do_delete=False) as fc: req = self.new_delete_request( 'flow_classifiers', fc['flow_classifier']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(500, res.status_int) driver_manager = self.fake_driver_manager driver_manager.delete_flow_classifier.assert_called_once_with( mock.ANY ) self._test_list_resources('flow_classifier', [fc]) def test_delete_flow_classifier_driver_manager_exception(self): self.fake_driver_manager.delete_flow_classifier = mock.Mock( side_effect=fc_exc.FlowClassifierDriverError( method='delete_flow_classifier' ) ) self._test_delete_flow_classifier_driver_manager_exception() def test_delete_flow_classifier_precommit_driver_manager_exception(self): self.fake_driver_manager.delete_flow_classifier_precommit = mock.Mock( side_effect=fc_exc.FlowClassifierDriverError( method='delete_flow_classifier_precommit' ) ) self._test_delete_flow_classifier_driver_manager_exception() networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/0000775000175000017500000000000013656750461024330 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/test_driver_manager.py0000664000175000017500000002245213656750333030731 0ustar zuulzuul00000000000000# Copyright 2017 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import Mock from stevedore.extension import Extension from neutron.tests import base from networking_sfc.services.sfc.common import exceptions as sfc_exc from networking_sfc.services.sfc.driver_manager \ import SfcDriverManager class DriverManagerTestCase(base.BaseTestCase): def setUp(self): super(DriverManagerTestCase, self).setUp() def test_initialize_called(self): driver1 = Extension('mock_driver1', Mock(), None, Mock(native_bulk_support=True)) driver2 = Extension('mock_driver2', Mock(), None, Mock(native_bulk_support=True)) manager = SfcDriverManager.make_test_instance([driver1, driver2]) manager.initialize() driver1.obj.initialize.assert_called_once_with() driver2.obj.initialize.assert_called_once_with() def _test_method_called(self, method_name): driver1 = Extension('mock_driver1', Mock(), None, Mock(native_bulk_support=True)) driver2 = Extension('mock_driver2', Mock(), None, Mock(native_bulk_support=True)) manager = SfcDriverManager.make_test_instance([driver1, driver2]) mocked_context = Mock() getattr(manager, method_name)(mocked_context) getattr(driver1.obj, method_name).assert_called_once_with( mocked_context) getattr(driver2.obj, method_name).assert_called_once_with( mocked_context) def _test_method_exception(self, method_name, expected_exc=sfc_exc.SfcDriverError): driver = Extension('mock_driver', Mock(), None, Mock(native_bulk_support=True)) mock_method = Mock(side_effect=sfc_exc.SfcException) setattr(driver.obj, method_name, mock_method) manager = SfcDriverManager.make_test_instance([driver]) mocked_context = Mock() self.assertRaises(expected_exc, getattr(manager, method_name), mocked_context) def test_create_port_chain_precommit_called(self): self._test_method_called("create_port_chain_precommit") def test_create_port_chain_precommit_exception(self): self._test_method_exception("create_port_chain_precommit", sfc_exc.SfcException) def test_create_port_chain_postcommit_called(self): self._test_method_called("create_port_chain_postcommit") def test_create_port_chain_postcommit_exception(self): self._test_method_exception("create_port_chain_postcommit") def test_update_port_chain_precommit_called(self): self._test_method_called("update_port_chain_precommit") def test_update_port_chain_precommit_exception(self): self._test_method_exception("update_port_chain_precommit") def test_update_port_chain_postcommit_called(self): self._test_method_called("update_port_chain_postcommit") def test_update_port_chain_postcommit_exception(self): self._test_method_exception("update_port_chain_postcommit") def test_delete_port_chain_called(self): self._test_method_called("delete_port_chain") def test_delete_port_chain_exception(self): self._test_method_exception("delete_port_chain") def test_delete_port_chain_precommit_called(self): self._test_method_called("delete_port_chain_precommit") def test_delete_port_chain_precommit_exception(self): self._test_method_exception("delete_port_chain_precommit") def test_delete_port_chain_postcommit_called(self): self._test_method_called("delete_port_chain_postcommit") def test_delete_port_chain_postcommit_exception(self): self._test_method_exception("delete_port_chain_postcommit") def test_create_port_pair_group_precommit_called(self): self._test_method_called("create_port_pair_group_precommit") def test_create_port_pair_group_precommit_exception(self): self._test_method_exception("create_port_pair_group_precommit") def test_create_port_pair_group_postcommit_called(self): self._test_method_called("create_port_pair_group_postcommit") def test_create_port_pair_group_postcommit_exception(self): self._test_method_exception("create_port_pair_group_postcommit") def test_update_port_pair_group_precommit_called(self): self._test_method_called("update_port_pair_group_precommit") def test_update_port_pair_group_precommit_exception(self): self._test_method_exception("update_port_pair_group_precommit") def test_update_port_pair_group_postcommit_called(self): self._test_method_called("update_port_pair_group_postcommit") def test_update_port_pair_group_postcommit_exception(self): self._test_method_exception("update_port_pair_group_postcommit") def test_delete_port_pair_group_called(self): self._test_method_called("delete_port_pair_group") def test_delete_port_pair_group_exception(self): self._test_method_exception("delete_port_pair_group") def test_delete_port_pair_group_precommit_called(self): self._test_method_called("delete_port_pair_group_precommit") def test_delete_port_pair_group_precommit_exception(self): self._test_method_exception("delete_port_pair_group_precommit") def test_delete_port_pair_group_postcommit_called(self): self._test_method_called("delete_port_pair_group_postcommit") def test_delete_port_pair_group_postcommit_exception(self): self._test_method_exception("delete_port_pair_group_postcommit") def test_create_port_pair_precommit_called(self): self._test_method_called("create_port_pair_precommit") def test_create_port_pair_precommit_exception(self): self._test_method_exception("create_port_pair_precommit") def test_create_port_pair_postcommit_called(self): self._test_method_called("create_port_pair_postcommit") def test_create_port_pair_postcommit_exception(self): self._test_method_exception("create_port_pair_postcommit") def test_update_port_pair_precommit_called(self): self._test_method_called("update_port_pair_precommit") def test_update_port_pair_precommit_exception(self): self._test_method_exception("update_port_pair_precommit") def test_update_port_pair_postcommit_called(self): self._test_method_called("update_port_pair_postcommit") def test_update_port_pair_postcommit_exception(self): self._test_method_exception("update_port_pair_postcommit") def test_delete_port_pair_called(self): self._test_method_called("delete_port_pair") def test_delete_port_pair_exception(self): self._test_method_exception("delete_port_pair") def test_delete_port_pair_precommit_called(self): self._test_method_called("delete_port_pair_precommit") def test_delete_port_pair_precommit_exception(self): self._test_method_exception("delete_port_pair_precommit") def test_delete_port_pair_postcommit_called(self): self._test_method_called("delete_port_pair_postcommit") def test_delete_port_pair_postcommit_exception(self): self._test_method_exception("delete_port_pair_postcommit") def test_create_service_graph_precommit_called(self): self._test_method_called("create_service_graph_precommit") def test_create_service_graph_precommit_exception(self): self._test_method_exception("create_service_graph_precommit") def test_create_service_graph_postcommit_called(self): self._test_method_called("create_service_graph_postcommit") def test_create_service_graph_postcommit_exception(self): self._test_method_exception("create_service_graph_postcommit") def test_update_service_graph_precommit_called(self): self._test_method_called("update_service_graph_precommit") def test_update_service_graph_precommit_exception(self): self._test_method_exception("update_service_graph_precommit") def test_update_service_graph_postcommit_called(self): self._test_method_called("update_service_graph_postcommit") def test_update_service_graph_postcommit_exception(self): self._test_method_exception("update_service_graph_postcommit") def test_delete_service_graph_precommit_called(self): self._test_method_called("delete_service_graph_precommit") def test_delete_service_graph_precommit_exception(self): self._test_method_exception("delete_service_graph_precommit") def test_delete_service_graph_postcommit_called(self): self._test_method_called("delete_service_graph_postcommit") def test_delete_service_graph_postcommit_exception(self): self._test_method_exception("delete_service_graph_postcommit") networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/__init__.py0000664000175000017500000000000013656750333026425 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/drivers/0000775000175000017500000000000013656750461026006 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/drivers/__init__.py0000664000175000017500000000000013656750333030103 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/drivers/ovs/0000775000175000017500000000000013656750461026615 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/drivers/ovs/__init__.py0000664000175000017500000000000013656750333030712 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/drivers/ovs/test_driver.py0000664000175000017500000123044513656750333031530 0ustar zuulzuul00000000000000# Copyright 2015 Huawei. All rights reserved. # Copyright 2017 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from eventlet import greenthread from neutron.api import extensions as api_ext from neutron.common import config from neutron.plugins.ml2.drivers import type_vxlan from neutron_lib.api.definitions import portbindings from neutron_lib import context from neutron_lib import rpc as n_rpc from oslo_utils import importutils from networking_sfc.db import flowclassifier_db as fdb from networking_sfc.db import sfc_db from networking_sfc.extensions import flowclassifier from networking_sfc.extensions import servicegraph from networking_sfc.extensions import sfc from networking_sfc.extensions import tap from networking_sfc.services.sfc.common import context as sfc_ctx from networking_sfc.services.sfc.common import exceptions as sfc_exc from networking_sfc.services.sfc.drivers.ovs import driver from networking_sfc.services.sfc.drivers.ovs import rpc from networking_sfc.tests import base from networking_sfc.tests.unit.db import test_flowclassifier_db from networking_sfc.tests.unit.db import test_sfc_db class OVSSfcDriverTestCase( test_sfc_db.SfcDbPluginTestCaseBase, test_flowclassifier_db.FlowClassifierDbPluginTestCaseBase, base.NeutronDbPluginV2TestCase ): resource_prefix_map = dict([ (k, sfc.SFC_PREFIX) for k in sfc.RESOURCE_ATTRIBUTE_MAP.keys() ] + [ (k, flowclassifier.FLOW_CLASSIFIER_PREFIX) for k in flowclassifier.RESOURCE_ATTRIBUTE_MAP.keys() ] + [ (k, servicegraph.SG_PREFIX) for k in servicegraph.RESOURCE_ATTRIBUTE_MAP.keys() ]) def record_rpc(self, method, data): self.rpc_calls[method].append(data) def ask_agent_to_update_flow_rules(self, context, flows): self.record_rpc('update_flow_rules', flows) def ask_agent_to_delete_flow_rules(self, context, flows): self.record_rpc('delete_flow_rules', flows) def ask_agent_to_update_src_node_flow_rules(self, context, flows): self.record_rpc('update_src_node_flow_rules', flows) def ask_agent_to_delete_src_node_flow_rules(self, context, flows): self.record_rpc('delete_src_node_flow_rules', flows) def spawn(self, function, *args, **kwargs): self.threads.append(self.backup_spawn(function, *args, **kwargs)) def wait(self): for thread in self.threads: thread.wait() def get_endpoint_by_host(self, host): ip_address = self.host_endpoint_mapping.get(host) return {'host': host, 'ip_address': ip_address} def init_rpc_calls(self): self.rpc_calls = { 'update_flow_rules': [], 'delete_flow_rules': [], 'update_src_node_flow_rules': [], 'delete_src_node_flow_rules': [] } def setUp(self): sfc_plugin = test_sfc_db.DB_SFC_PLUGIN_CLASS flowclassifier_plugin = ( test_flowclassifier_db.DB_FLOWCLASSIFIER_PLUGIN_CLASS) service_plugins = { sfc.SFC_EXT: sfc_plugin, flowclassifier.FLOW_CLASSIFIER_EXT: flowclassifier_plugin } sfc_db.SfcDbPlugin.supported_extension_aliases = [ sfc.SFC_EXT, servicegraph.SG_EXT, tap.TAP_EXT] sfc_db.SfcDbPlugin.path_prefix = sfc.SFC_PREFIX fdb.FlowClassifierDbPlugin.supported_extension_aliases = [ flowclassifier.FLOW_CLASSIFIER_EXT] fdb.FlowClassifierDbPlugin.path_prefix = ( flowclassifier.FLOW_CLASSIFIER_PREFIX ) super(OVSSfcDriverTestCase, self).setUp( ext_mgr=None, plugin=None, service_plugins=service_plugins ) self.sfc_plugin = importutils.import_object(sfc_plugin) self.flowclassifier_plugin = importutils.import_object( flowclassifier_plugin) ext_mgr = api_ext.PluginAwareExtensionManager.get_instance() app = config.load_paste_app('extensions_test_app') self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr) self.init_rpc_calls() self.hostname = 'testhost' self.ctx = context.get_admin_context() self.backup_notifier_creator = rpc.SfcAgentRpcClient self.mocked_notifier = mock.Mock() self.mocked_notifier.ask_agent_to_update_flow_rules = mock.Mock( side_effect=self.ask_agent_to_update_flow_rules ) self.mocked_notifier.ask_agent_to_delete_flow_rules = mock.Mock( side_effect=self.ask_agent_to_delete_flow_rules ) self.mocked_notifier.ask_agent_to_delete_src_node_flow_rules = ( mock.Mock( side_effect=self.ask_agent_to_delete_src_node_flow_rules ) ) self.mocked_notifier.ask_agent_to_update_src_node_flow_rules = ( mock.Mock( side_effect=self.ask_agent_to_update_src_node_flow_rules ) ) rpc.SfcAgentRpcClient = mock.Mock( return_value=self.mocked_notifier) self.backup_conn_creator = n_rpc.Connection n_rpc.Connection = mock.Mock() n_rpc.Connection.return_value = mock.Mock() self.threads = [] self.backup_spawn = greenthread.spawn greenthread.spawn = mock.Mock( side_effect=self.spawn) self.host_endpoint_mapping = {} self.backup_get_endpoint_by_host = ( type_vxlan.VxlanTypeDriver.get_endpoint_by_host) type_vxlan.VxlanTypeDriver.get_endpoint_by_host = mock.Mock( side_effect=self.get_endpoint_by_host) self.driver = driver.OVSSfcDriver() self.driver.initialize() def tearDown(self): rpc.SfcAgentRpcClient = self.backup_notifier_creator n_rpc.Connection = self.backup_conn_creator greenthread.spawn = self.backup_spawn type_vxlan.VxlanTypeDriver.get_endpoint_by_host = ( self.backup_get_endpoint_by_host) self.init_rpc_calls() super(OVSSfcDriverTestCase, self).tearDown() def map_flow_rules(self, flow_rules, *args): flow_rule_dict = {} for arg in args: if arg: flow_rules = flow_rules + arg for flow_rule in flow_rules: ingress = flow_rule['ingress'] or '' egress = flow_rule['egress'] or '' key = self.build_ingress_egress( flow_rule['portchain_id'], ingress, egress) if key in flow_rule_dict: flow_rule_by_key = flow_rule_dict[key] for flow_key, flow_value in flow_rule.items(): if flow_key not in flow_rule_by_key: flow_rule_by_key[flow_key] = flow_value elif isinstance(flow_value, list): flow_rule_item = flow_rule_by_key[flow_key] for flow_item in flow_value: if flow_item not in flow_rule_item: flow_rule_item.append(flow_item) else: flow_rule_by_key[flow_key] = flow_value else: flow_rule_dict[key] = flow_rule return flow_rule_dict def build_ingress_egress(self, pc_id, ingress, egress): return '%s:%s:%s' % (pc_id[:8] or '', ingress or '', egress or '') def build_ingress_egress_from_pp(self, pc_id, pp): # pp must be a dict of Port Pairs' attributes return '%s:%s:%s' % (pc_id[:8] or '', pp.get( 'ingress') or '', pp.get('egress') or '') def next_hops_info(self, next_hops): info = {} if not next_hops: return info for next_hop in next_hops: if next_hop['in_mac_address'] is None: info[next_hop['mac_address']] = next_hop['local_endpoint'] else: info[next_hop['in_mac_address']] = next_hop['local_endpoint'] return info def test_create_port_chain(self): with self.port_pair_group(port_pair_group={ 'name': 'test1', }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() self.assertEqual(self.rpc_calls['update_flow_rules'], []) def test_create_port_chain_with_port_pairs(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='port2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as dst_port: self.host_endpoint_mapping = { 'test': '10.0.0.1', } with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pp: pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp['port_pair'] ) self.driver.create_port_pair(pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp['port_pair']['id']] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], pp['port_pair']['ingress'], pp['port_pair']['egress']) self.assertEqual( set(update_flow_rules.keys()), {flow1}) self.assertEqual( update_flow_rules[flow1]['add_fcs'], []) self.assertEqual( update_flow_rules[flow1]['del_fcs'], []) self.assertEqual( update_flow_rules[flow1]['node_type'], 'sf_node') self.assertIsNone( update_flow_rules[flow1].get('next_hops') ) self.assertIsNone( update_flow_rules[flow1]['next_group_id'] ) def _test_create_port_chain_with_pp_fc_and_no_sfc_proxy(self, correlation): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='ingress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress, self.port( name='egress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress: self.host_endpoint_mapping = { 'test': '10.0.0.1', } with self.flow_classifier(flow_classifier={ 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 300, 'destination_port_range_max': 400, 'ethertype': 'IPv4', 'source_ip_prefix': '10.100.0.0/16', 'destination_ip_prefix': '10.200.0.0/16', 'l7_parameters': {}, 'protocol': 'tcp', 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_pair(port_pair={ 'ingress': ingress['port']['id'], 'egress': egress['port']['id'], 'service_function_parameters': {'correlation': correlation} }) as pp: pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp['port_pair'] ) self.driver.create_port_pair(pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp['port_pair']['id']] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']], 'chain_parameters': {'correlation': correlation} }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) # flow1 - src_node flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) # flow2 - sf_node flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress['port']['id'], egress['port']['id'] ) self.assertEqual( set(update_flow_rules.keys()), set([flow1, flow2])) add_fcs = update_flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': 'tcp', 'source_ip_prefix': '10.100.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, {ingress['port']['mac_address']: '10.0.0.1'}) self.assertIsNotNone( update_flow_rules[flow1]['next_group_id'] ) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') add_fcs = update_flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': 'tcp', 'source_ip_prefix': '10.100.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, {}) self.assertIsNone( update_flow_rules[flow2]['next_group_id'] ) self.assertEqual( update_flow_rules[flow2]['node_type'], 'sf_node') # src_node flow rule doesn't have pp_corr: self.assertIsNone( update_flow_rules[flow1]['pp_corr']) # but the sf_node does: self.assertEqual( update_flow_rules[flow2]['pp_corr'], correlation ) # sf_node from src_node's next_hops: self.assertEqual( update_flow_rules[flow1][ 'next_hops'][0]['pp_corr'], correlation ) # pc_corr should be present in any kind of node self.assertEqual( update_flow_rules[flow1]['pc_corr'], correlation ) self.assertEqual( update_flow_rules[flow2]['pc_corr'], correlation ) def test_create_port_chain_with_pp_fc_and_no_sfc_proxy_mpls(self): self._test_create_port_chain_with_pp_fc_and_no_sfc_proxy('mpls') def test_create_port_chain_with_pp_fc_and_no_sfc_proxy_nsh(self): self._test_create_port_chain_with_pp_fc_and_no_sfc_proxy('nsh') def test_create_port_chain_with_flow_classifiers(self): with self.port( name='src', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 300, 'destination_port_range_max': 400, 'ethertype': 'IPv4', 'source_ip_prefix': '10.100.0.0/16', 'destination_ip_prefix': '10.200.0.0/16', 'l7_parameters': {}, 'protocol': 'tcp', 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_pair_group(port_pair_group={ 'port_pairs': [] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, fc['flow_classifier']['logical_source_port']) self.assertEqual( set(update_flow_rules.keys()), {flow1}) self.assertEqual( len(update_flow_rules[flow1]['add_fcs']), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': u'tcp', 'source_ip_prefix': u'10.100.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, update_flow_rules[flow1]['add_fcs'][0]) self.assertEqual( update_flow_rules[flow1]['del_fcs'], []) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') self.assertIsNone( update_flow_rules[flow1].get('next_hops') ) self.assertIsNotNone( update_flow_rules[flow1]['next_group_id'] ) def test_create_port_chain_with_flow_classifiers_port_pairs(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='ingress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress, self.port( name='egress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress: self.host_endpoint_mapping = { 'test': '10.0.0.1', } with self.flow_classifier(flow_classifier={ 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 300, 'destination_port_range_max': 400, 'ethertype': 'IPv4', 'source_ip_prefix': '10.100.0.0/16', 'destination_ip_prefix': '10.200.0.0/16', 'l7_parameters': {}, 'protocol': 'tcp', 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_pair(port_pair={ 'ingress': ingress['port']['id'], 'egress': egress['port']['id'] }) as pp: pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp['port_pair'] ) self.driver.create_port_pair(pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp['port_pair']['id']] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress['port']['id'], egress['port']['id'] ) self.assertEqual( set(update_flow_rules.keys()), {flow1, flow2}) add_fcs = update_flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': 'tcp', 'source_ip_prefix': '10.100.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, {ingress['port']['mac_address']: '10.0.0.1'}) self.assertIsNotNone( update_flow_rules[flow1]['next_group_id'] ) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') add_fcs = update_flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': 'tcp', 'source_ip_prefix': '10.100.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, {}) self.assertIsNone( update_flow_rules[flow2]['next_group_id'] ) self.assertEqual( update_flow_rules[flow2]['node_type'], 'sf_node') def test_create_port_chain_with_fc_ppg_n_tuple_mapping(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='ingress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress, self.port( name='egress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress: self.host_endpoint_mapping = { 'test': '10.0.0.1', } with self.flow_classifier(flow_classifier={ 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 300, 'destination_port_range_max': 400, 'ethertype': 'IPv4', 'source_ip_prefix': '10.100.0.0/16', 'destination_ip_prefix': '10.200.0.0/16', 'l7_parameters': {}, 'protocol': 'tcp', 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_pair(port_pair={ 'ingress': ingress['port']['id'], 'egress': egress['port']['id'] }) as pp: pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp['port_pair'] ) self.driver.create_port_pair(pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp['port_pair']['id']], 'port_pair_group_parameters': { 'ppg_n_tuple_mapping': { 'ingress_n_tuple': {}, 'egress_n_tuple': { 'source_ip_prefix': '10.300.0.10/16'} } } }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress['port']['id'], egress['port']['id'] ) self.assertEqual( set(update_flow_rules.keys()), {flow1, flow2}) add_fcs = update_flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': 'tcp', 'source_ip_prefix': '10.100.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, {ingress['port']['mac_address']: '10.0.0.1'}) self.assertIsNotNone( update_flow_rules[flow1]['next_group_id'] ) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') add_fcs = update_flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': 'tcp', 'source_ip_prefix': '10.300.0.10/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, {}) self.assertIsNone( update_flow_rules[flow2]['next_group_id'] ) self.assertEqual( update_flow_rules[flow2]['node_type'], 'sf_node') def test_create_port_chain_multi_port_groups_port_pairs(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='ingress1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress1, self.port( name='egress1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress1, self.port( name='ingress2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress2, self.port( name='egress2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress2, self.port( name='ingress3', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress3, self.port( name='egress3', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress3: self.host_endpoint_mapping = { 'test': '10.0.0.1', } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_pair(port_pair={ 'ingress': ingress1['port']['id'], 'egress': egress1['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': ingress2['port']['id'], 'egress': egress2['port']['id'] }) as pp2, self.port_pair(port_pair={ 'ingress': ingress3['port']['id'], 'egress': egress3['port']['id'] }) as pp3: pp1_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp1['port_pair'] ) self.driver.create_port_pair(pp1_context) pp2_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp2['port_pair'] ) self.driver.create_port_pair(pp2_context) pp3_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp3['port_pair'] ) self.driver.create_port_pair(pp3_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp1['port_pair']['id']] }) as pg1, self.port_pair_group(port_pair_group={ 'port_pairs': [pp2['port_pair']['id']] }) as pg2, self.port_pair_group(port_pair_group={ 'port_pairs': [pp3['port_pair']['id']] }) as pg3: pg1_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg1['port_pair_group'] ) self.driver.create_port_pair_group(pg1_context) pg2_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg2['port_pair_group'] ) self.driver.create_port_pair_group(pg2_context) pg3_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg3['port_pair_group'] ) self.driver.create_port_pair_group(pg3_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [ pg1['port_pair_group']['id'], pg2['port_pair_group']['id'], pg3['port_pair_group']['id'] ], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress1['port']['id'], egress1['port']['id']) flow3 = self.build_ingress_egress( pc['port_chain']['id'], ingress2['port']['id'], egress2['port']['id']) flow4 = self.build_ingress_egress( pc['port_chain']['id'], ingress3['port']['id'], egress3['port']['id']) self.assertEqual( set(update_flow_rules.keys()), {flow1, flow2, flow3, flow4}) add_fcs = update_flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) ip_src = ( src_port['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow1].get('next_hops') ) self.assertEqual( next_hops, { ingress1['port']['mac_address']: '10.0.0.1' } ) self.assertIsNotNone( update_flow_rules[flow1]['next_group_id'] ) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') add_fcs = update_flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, {ingress2['port']['mac_address']: '10.0.0.1'}) self.assertIsNotNone( update_flow_rules[flow2]['next_group_id'] ) self.assertEqual( update_flow_rules[flow2]['node_type'], 'sf_node') add_fcs = update_flow_rules[flow3]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow3].get('next_hops')) self.assertEqual( next_hops, {ingress3['port']['mac_address']: '10.0.0.1'}) self.assertIsNotNone( update_flow_rules[flow3]['next_group_id'] ) self.assertEqual( update_flow_rules[flow3]['node_type'], 'sf_node') add_fcs = update_flow_rules[flow4]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow4].get('next_hops')) self.assertEqual( next_hops, {}) self.assertIsNone( update_flow_rules[flow4]['next_group_id'] ) self.assertEqual( update_flow_rules[flow4]['node_type'], 'sf_node') def test_create_port_chain_port_groups_multi_port_pairs(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='ingress1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress1, self.port( name='egress1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress1, self.port( name='ingress2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress2, self.port( name='egress2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress2, self.port( name='ingress3', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress3, self.port( name='egress3', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress3: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_pair(port_pair={ 'ingress': ingress1['port']['id'], 'egress': egress1['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': ingress2['port']['id'], 'egress': egress2['port']['id'] }) as pp2, self.port_pair(port_pair={ 'ingress': ingress3['port']['id'], 'egress': egress3['port']['id'] }) as pp3: pp1_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp1['port_pair'] ) self.driver.create_port_pair(pp1_context) pp2_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp2['port_pair'] ) self.driver.create_port_pair(pp2_context) pp3_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp3['port_pair'] ) self.driver.create_port_pair(pp3_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [ pp1['port_pair']['id'], pp2['port_pair']['id'], pp3['port_pair']['id'] ] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [ pg['port_pair_group']['id'] ], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress1['port']['id'], egress1['port']['id']) flow3 = self.build_ingress_egress( pc['port_chain']['id'], ingress2['port']['id'], egress2['port']['id']) flow4 = self.build_ingress_egress( pc['port_chain']['id'], ingress3['port']['id'], egress3['port']['id']) self.assertEqual( set(update_flow_rules.keys()), {flow1, flow2, flow3, flow4}) add_fcs = update_flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) ip_src = ( src_port['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow1].get('next_hops') ) self.assertEqual(next_hops, { ingress1['port']['mac_address']: '10.0.0.1', ingress2['port']['mac_address']: '10.0.0.1', ingress3['port']['mac_address']: '10.0.0.1' }) self.assertIsNotNone( update_flow_rules[flow1]['next_group_id'] ) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') add_fcs = update_flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, {}) self.assertIsNone( update_flow_rules[flow2]['next_group_id'] ) self.assertEqual( update_flow_rules[flow2]['node_type'], 'sf_node') add_fcs = update_flow_rules[flow3]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow3].get('next_hops')) self.assertEqual( next_hops, {}) self.assertIsNone( update_flow_rules[flow3]['next_group_id'] ) self.assertEqual( update_flow_rules[flow3]['node_type'], 'sf_node') add_fcs = update_flow_rules[flow4]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow4].get('next_hops')) self.assertEqual( next_hops, {}) self.assertIsNone( update_flow_rules[flow4]['next_group_id'] ) self.assertEqual( update_flow_rules[flow4]['node_type'], 'sf_node') def test_create_port_chain_multi_port_groups_multi_port_pairs(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='ingress1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress1, self.port( name='egress1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress1, self.port( name='ingress2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress2, self.port( name='egress2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress2, self.port( name='ingress3', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress3, self.port( name='egress3', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress3, self.port( name='ingress4', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress4, self.port( name='egress4', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress4: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_pair(port_pair={ 'ingress': ingress1['port']['id'], 'egress': egress1['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': ingress2['port']['id'], 'egress': egress2['port']['id'] }) as pp2, self.port_pair(port_pair={ 'ingress': ingress3['port']['id'], 'egress': egress3['port']['id'] }) as pp3, self.port_pair(port_pair={ 'ingress': ingress4['port']['id'], 'egress': egress4['port']['id'] }) as pp4: pp1_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp1['port_pair'] ) self.driver.create_port_pair(pp1_context) pp2_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp2['port_pair'] ) self.driver.create_port_pair(pp2_context) pp3_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp3['port_pair'] ) self.driver.create_port_pair(pp3_context) pp4_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp4['port_pair'] ) self.driver.create_port_pair(pp4_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [ pp1['port_pair']['id'], pp2['port_pair']['id'] ] }) as pg1, self.port_pair_group(port_pair_group={ 'port_pairs': [ pp3['port_pair']['id'], pp4['port_pair']['id'] ] }) as pg2: pg1_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg1['port_pair_group'] ) self.driver.create_port_pair_group(pg1_context) pg2_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg2['port_pair_group'] ) self.driver.create_port_pair_group(pg2_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [ pg1['port_pair_group']['id'], pg2['port_pair_group']['id'] ], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress1['port']['id'], egress1['port']['id']) flow3 = self.build_ingress_egress( pc['port_chain']['id'], ingress2['port']['id'], egress2['port']['id']) flow4 = self.build_ingress_egress( pc['port_chain']['id'], ingress3['port']['id'], egress3['port']['id']) flow5 = self.build_ingress_egress( pc['port_chain']['id'], ingress4['port']['id'], egress4['port']['id']) self.assertEqual( set(update_flow_rules.keys()), {flow1, flow2, flow3, flow4, flow5}) add_fcs = update_flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) ip_src = ( src_port['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow1].get('next_hops') ) self.assertEqual(next_hops, { ingress1['port']['mac_address']: '10.0.0.1', ingress2['port']['mac_address']: '10.0.0.1', }) self.assertIsNotNone( update_flow_rules[flow1]['next_group_id'] ) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') add_fcs = update_flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow2].get('next_hops')) self.assertEqual(next_hops, { ingress3['port']['mac_address']: '10.0.0.1', ingress4['port']['mac_address']: '10.0.0.1' }) self.assertIsNotNone( update_flow_rules[flow2]['next_group_id'] ) self.assertEqual( update_flow_rules[flow2]['node_type'], 'sf_node') add_fcs = update_flow_rules[flow3]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow3].get('next_hops')) self.assertEqual(next_hops, { ingress3['port']['mac_address']: '10.0.0.1', ingress4['port']['mac_address']: '10.0.0.1' }) self.assertIsNotNone( update_flow_rules[flow3]['next_group_id'] ) self.assertEqual( update_flow_rules[flow3]['node_type'], 'sf_node') add_fcs = update_flow_rules[flow4]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow4].get('next_hops')) self.assertEqual( next_hops, {}) self.assertIsNone( update_flow_rules[flow4]['next_group_id'] ) self.assertEqual( update_flow_rules[flow4]['node_type'], 'sf_node') add_fcs = update_flow_rules[flow5]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow5].get('next_hops')) self.assertEqual( next_hops, {}) self.assertIsNone( update_flow_rules[flow5]['next_group_id'] ) self.assertEqual( update_flow_rules[flow5]['node_type'], 'sf_node') def test_create_port_chain_with_multi_flow_classifiers_port_pairs(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port1, self.port( name='port3', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port2, self.port( name='ingress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress, self.port( name='egress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port1['port']['id'] }) as fc1, self.flow_classifier(flow_classifier={ 'logical_source_port': src_port2['port']['id'] }) as fc2: with self.port_pair(port_pair={ 'ingress': ingress['port']['id'], 'egress': egress['port']['id'] }) as pp: pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp['port_pair'] ) self.driver.create_port_pair(pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp['port_pair']['id']] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [ fc1['flow_classifier']['id'], fc2['flow_classifier']['id'] ] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port1['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port2['port']['id']) flow3 = self.build_ingress_egress( pc['port_chain']['id'], ingress['port']['id'], egress['port']['id']) self.assertEqual( set(update_flow_rules.keys()), {flow1, flow2, flow3}) add_fcs = update_flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) ip_src1 = ( src_port1['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src1, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, { ingress['port']['mac_address']: '10.0.0.1' } ) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') add_fcs = update_flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 1) ip_src2 = ( src_port2['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src2, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, { ingress['port']['mac_address']: '10.0.0.1' } ) self.assertEqual( update_flow_rules[flow2]['node_type'], 'src_node') add_fcs = update_flow_rules[flow3]['add_fcs'] self.assertEqual(len(add_fcs), 2) self._assert_flow_classifiers_match_subsets( add_fcs, [{ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src1, 'source_port_range_max': None, 'source_port_range_min': None }, { 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src2, 'source_port_range_max': None, 'source_port_range_min': None }], 'source_ip_prefix') next_hops = self.next_hops_info( update_flow_rules[flow3].get('next_hops')) self.assertEqual( next_hops, { } ) self.assertEqual( update_flow_rules[flow3]['node_type'], 'sf_node') def test_delete_port_chain(self): with self.port_pair_group(port_pair_group={ 'name': 'test1', }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() self.driver.delete_port_chain(pc_context) self.wait() self.assertEqual( self.rpc_calls['delete_flow_rules'], []) def test_delete_port_chain_with_port_pairs(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='port2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as dst_port: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pp: pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp['port_pair'] ) self.driver.create_port_pair(pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp['port_pair']['id']] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() self.driver.delete_port_chain(pc_context) self.wait() delete_flow_rules = self.map_flow_rules( self.rpc_calls['delete_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], pp['port_pair']['ingress'], pp['port_pair']['egress']) self.assertEqual( set(delete_flow_rules.keys()), {flow1}) self.assertEqual( delete_flow_rules[flow1]['add_fcs'], []) self.assertEqual( delete_flow_rules[flow1]['del_fcs'], []) self.assertEqual( delete_flow_rules[flow1]['node_type'], 'sf_node') def test_delete_port_chain_with_flow_classifiers(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 300, 'destination_port_range_max': 400, 'ethertype': 'IPv4', 'source_ip_prefix': '10.100.0.0/16', 'destination_ip_prefix': '10.200.0.0/16', 'l7_parameters': {}, 'protocol': 'tcp', 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_pair_group(port_pair_group={ 'port_pairs': [] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() self.driver.delete_port_chain(pc_context) self.wait() delete_flow_rules = self.map_flow_rules( self.rpc_calls['delete_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) self.assertEqual( set(delete_flow_rules.keys()), {flow1}) self.assertEqual( delete_flow_rules[flow1]['add_fcs'], []) del_fcs = delete_flow_rules[flow1]['del_fcs'] self.assertEqual(len(del_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': u'10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': u'tcp', 'source_ip_prefix': u'10.100.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, del_fcs[0]) self.assertEqual( delete_flow_rules[flow1]['node_type'], 'src_node') def test_delete_port_chain_with_flow_classifiers_port_pairs(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='ingress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress, self.port( name='egress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 300, 'destination_port_range_max': 400, 'ethertype': 'IPv4', 'source_ip_prefix': '10.100.0.0/16', 'destination_ip_prefix': '10.200.0.0/16', 'l7_parameters': {}, 'protocol': 'tcp', 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_pair(port_pair={ 'ingress': ingress['port']['id'], 'egress': egress['port']['id'] }) as pp: pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp['port_pair'] ) self.driver.create_port_pair(pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp['port_pair']['id']] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() self.init_rpc_calls() self.driver.delete_port_chain(pc_context) self.wait() delete_flow_rules = self.map_flow_rules( self.rpc_calls['delete_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id'] ) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress['port']['id'], egress['port']['id'] ) self.assertEqual( set(delete_flow_rules.keys()), {flow1, flow2}) del_fcs = delete_flow_rules[flow1]['del_fcs'] self.assertEqual(len(del_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': 'tcp', 'source_ip_prefix': '10.100.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, del_fcs[0]) next_hops = self.next_hops_info( delete_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, {ingress['port']['mac_address']: '10.0.0.1'}) self.assertEqual( delete_flow_rules[flow1]['node_type'], 'src_node') del_fcs = delete_flow_rules[flow2]['del_fcs'] self.assertEqual(len(del_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': 'tcp', 'source_ip_prefix': '10.100.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, del_fcs[0]) next_hops = self.next_hops_info( delete_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, {}) self.assertEqual( delete_flow_rules[flow2]['node_type'], 'sf_node') def test_update_port_chain_add_port_pair(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='port3', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress1, self.port( name='port4', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress1, self.port( name='port5', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress2, self.port( name='port6', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress2: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port['port']['id'] }) as fc, self.port_pair(port_pair={ 'ingress': ingress1['port']['id'], 'egress': egress1['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': ingress2['port']['id'], 'egress': egress2['port']['id'] }) as pp2: pp1_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp1['port_pair'] ) self.driver.create_port_pair(pp1_context) pp2_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp2['port_pair'] ) self.driver.create_port_pair(pp2_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [ pp1['port_pair']['id'], ] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() self.init_rpc_calls() updates = { 'port_pairs': [ pp1['port_pair']['id'], pp2['port_pair']['id'] ] } req = self.new_update_request( 'port_pair_groups', {'port_pair_group': updates}, pg['port_pair_group']['id'] ) res = req.get_response(self.ext_api) pg2 = self.deserialize( self.fmt, res ) pg2['port_pair_group']['port_chains'] = [ pc['port_chain']['id'] ] pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg2['port_pair_group'], original_portpairgroup=pg['port_pair_group'] ) self.driver.update_port_pair_group(pg_context) self.wait() update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress2['port']['id'], egress2['port']['id']) self.assertEqual( set(update_flow_rules.keys()), {flow1, flow2}) add_fcs = update_flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) ip_src = ( src_port['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, { ingress1['port']['mac_address']: '10.0.0.1', ingress2['port']['mac_address']: '10.0.0.1' } ) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') add_fcs = update_flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, {} ) self.assertEqual( update_flow_rules[flow2]['node_type'], 'sf_node') def test_update_port_chain_delete_port_pair(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='port3', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress1, self.port( name='port4', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress1, self.port( name='port5', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress2, self.port( name='port6', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress2: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port['port']['id'] }) as fc, self.port_pair(port_pair={ 'ingress': ingress1['port']['id'], 'egress': egress1['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': ingress2['port']['id'], 'egress': egress2['port']['id'] }) as pp2: pp1_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp1['port_pair'] ) self.driver.create_port_pair(pp1_context) pp2_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp2['port_pair'] ) self.driver.create_port_pair(pp2_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [ pp1['port_pair']['id'], pp2['port_pair']['id'] ] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() self.init_rpc_calls() updates = { 'port_pairs': [ pp1['port_pair']['id'] ] } req = self.new_update_request( 'port_pair_groups', {'port_pair_group': updates}, pg['port_pair_group']['id'] ) res = req.get_response(self.ext_api) pg2 = self.deserialize( self.fmt, res ) pg2['port_pair_group']['port_chains'] = [ pc['port_chain']['id'] ] pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg2['port_pair_group'], original_portpairgroup=pg['port_pair_group'] ) self.driver.update_port_pair_group(pg_context) self.wait() delete_flow_rules = self.map_flow_rules( self.rpc_calls['delete_flow_rules']) update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress2['port']['id'], egress2['port']['id']) self.assertEqual( set(delete_flow_rules.keys()), {flow1, flow2}) del_fcs = delete_flow_rules[flow1]['del_fcs'] self.assertEqual(len(del_fcs), 1) ip_src = ( src_port['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, del_fcs[0]) next_hops = self.next_hops_info( delete_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, { ingress1['port']['mac_address']: '10.0.0.1', ingress2['port']['mac_address']: '10.0.0.1' } ) self.assertEqual( delete_flow_rules[flow1]['node_type'], 'src_node') del_fcs = delete_flow_rules[flow2]['del_fcs'] self.assertEqual(len(del_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, del_fcs[0]) next_hops = self.next_hops_info( delete_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, { } ) self.assertEqual( delete_flow_rules[flow2]['node_type'], 'sf_node') self.assertEqual( set(update_flow_rules.keys()), {flow1}) add_fcs = update_flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, {ingress1['port']['mac_address']: '10.0.0.1'} ) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') def test_update_port_chain_replace_port_pair(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='port3', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress1, self.port( name='port4', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress1, self.port( name='port5', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress2, self.port( name='port6', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress2: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port['port']['id'] }) as fc, self.port_pair(port_pair={ 'ingress': ingress1['port']['id'], 'egress': egress1['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': ingress2['port']['id'], 'egress': egress2['port']['id'] }) as pp2: pp1_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp1['port_pair'] ) self.driver.create_port_pair(pp1_context) pp2_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp2['port_pair'] ) self.driver.create_port_pair(pp2_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [ pp1['port_pair']['id'] ] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() self.init_rpc_calls() updates = { 'port_pairs': [ pp2['port_pair']['id'] ] } req = self.new_update_request( 'port_pair_groups', {'port_pair_group': updates}, pg['port_pair_group']['id'] ) res = req.get_response(self.ext_api) pg2 = self.deserialize( self.fmt, res ) pg2['port_pair_group']['port_chains'] = [ pc['port_chain']['id'] ] pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg2['port_pair_group'], original_portpairgroup=pg['port_pair_group'] ) self.driver.update_port_pair_group(pg_context) self.wait() delete_flow_rules = self.map_flow_rules( self.rpc_calls['delete_flow_rules']) update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress1['port']['id'], egress1['port']['id']) flow3 = self.build_ingress_egress( pc['port_chain']['id'], ingress2['port']['id'], egress2['port']['id']) self.assertEqual( set(delete_flow_rules.keys()), {flow1, flow2}) del_fcs = delete_flow_rules[flow1]['del_fcs'] self.assertEqual(len(del_fcs), 1) ip_src = ( src_port['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, del_fcs[0]) next_hops = self.next_hops_info( delete_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, { ingress1['port']['mac_address']: '10.0.0.1', } ) self.assertEqual( delete_flow_rules[flow1]['node_type'], 'src_node') del_fcs = delete_flow_rules[flow2]['del_fcs'] self.assertEqual(len(del_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, del_fcs[0]) next_hops = self.next_hops_info( delete_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, { } ) self.assertEqual( delete_flow_rules[flow2]['node_type'], 'sf_node') self.assertEqual( set(update_flow_rules.keys()), {flow1, flow3}) add_fcs = update_flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, {ingress2['port']['mac_address']: '10.0.0.1'} ) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') add_fcs = update_flow_rules[flow3]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow3].get('next_hops')) self.assertEqual( next_hops, {} ) self.assertEqual( update_flow_rules[flow3]['node_type'], 'sf_node') def test_update_port_chain_replace_flow_classifier(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port1, self.port( name='port3', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port2, self.port( name='port5', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress, self.port( name='port6', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port1['port']['id'] }) as fc1, self.flow_classifier(flow_classifier={ 'logical_source_port': src_port2['port']['id'] }) as fc2, self.port_pair(port_pair={ 'ingress': ingress['port']['id'], 'egress': egress['port']['id'] }) as pp: pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp['port_pair'] ) self.driver.create_port_pair(pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [ pp['port_pair']['id'] ] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [ fc1['flow_classifier']['id'] ] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() self.init_rpc_calls() updates = { 'flow_classifiers': [ fc2['flow_classifier']['id'] ] } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc['port_chain']['id'] ) res = req.get_response(self.ext_api) pc2 = self.deserialize( self.fmt, res ) pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc2['port_chain'], original_portchain=pc['port_chain'] ) self.driver.update_port_chain(pc_context) self.wait() delete_flow_rules = self.map_flow_rules( self.rpc_calls['delete_flow_rules']) update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port1['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port2['port']['id']) flow3 = self.build_ingress_egress( pc['port_chain']['id'], ingress['port']['id'], egress['port']['id']) self.assertEqual( set(delete_flow_rules.keys()), {flow1, flow3}) del_fcs = delete_flow_rules[flow1]['del_fcs'] self.assertEqual(len(del_fcs), 1) ip_src1 = ( src_port1['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src1, 'source_port_range_max': None, 'source_port_range_min': None }, del_fcs[0]) next_hops = self.next_hops_info( delete_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, { ingress['port']['mac_address']: '10.0.0.1', } ) self.assertEqual( delete_flow_rules[flow1]['node_type'], 'src_node') del_fcs = delete_flow_rules[flow3]['del_fcs'] self.assertEqual(len(del_fcs), 1) ip_src2 = ( src_port2['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src1, 'source_port_range_max': None, 'source_port_range_min': None }, del_fcs[0]) next_hops = self.next_hops_info( delete_flow_rules[flow3].get('next_hops')) self.assertEqual( next_hops, {} ) self.assertEqual( delete_flow_rules[flow3]['node_type'], 'sf_node') self.assertEqual( set(update_flow_rules.keys()), {flow2, flow3}) add_fcs = update_flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src2, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, {ingress['port']['mac_address']: '10.0.0.1'} ) self.assertEqual( update_flow_rules[flow2]['node_type'], 'src_node') add_fcs = update_flow_rules[flow3]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src2, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow3].get('next_hops')) self.assertEqual( next_hops, {} ) self.assertEqual( update_flow_rules[flow3]['node_type'], 'sf_node') def test_update_port_chain_add_port_pair_group(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='port5', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress1, self.port( name='port6', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress1, self.port( name='port7', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress2, self.port( name='port8', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress2: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port['port']['id'] }) as fc, self.port_pair(port_pair={ 'ingress': ingress1['port']['id'], 'egress': egress1['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': ingress2['port']['id'], 'egress': egress2['port']['id'] }) as pp2: pp1_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp1['port_pair'] ) self.driver.create_port_pair(pp1_context) pp2_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp2['port_pair'] ) self.driver.create_port_pair(pp2_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [ pp1['port_pair']['id'] ] }) as pg1, self.port_pair_group(port_pair_group={ 'port_pairs': [ pp2['port_pair']['id'] ] }) as pg2: pg1_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg1['port_pair_group'] ) self.driver.create_port_pair_group(pg1_context) pg2_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg2['port_pair_group'] ) self.driver.create_port_pair_group(pg2_context) with self.port_chain(port_chain={ 'port_pair_groups': [pg1['port_pair_group']['id']], 'flow_classifiers': [ fc['flow_classifier']['id'] ] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() self.init_rpc_calls() updates = { 'port_pair_groups': [ pg1['port_pair_group']['id'], pg2['port_pair_group']['id'] ] } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc['port_chain']['id'] ) res = req.get_response(self.ext_api) pc2 = self.deserialize( self.fmt, res ) pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc2['port_chain'], original_portchain=pc['port_chain'] ) self.driver.update_port_chain(pc_context) self.wait() delete_flow_rules = self.map_flow_rules( self.rpc_calls['delete_flow_rules']) update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress1['port']['id'], egress1['port']['id']) flow3 = self.build_ingress_egress( pc['port_chain']['id'], ingress2['port']['id'], egress2['port']['id']) self.assertEqual( set(delete_flow_rules.keys()), {flow1, flow2}) del_fcs = delete_flow_rules[flow1]['del_fcs'] self.assertEqual(len(del_fcs), 1) ip_src = ( src_port['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, del_fcs[0]) next_hops = self.next_hops_info( delete_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, { ingress1['port']['mac_address']: '10.0.0.1', } ) self.assertEqual( delete_flow_rules[flow1]['node_type'], 'src_node') del_fcs = delete_flow_rules[flow2]['del_fcs'] self.assertEqual(len(del_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, del_fcs[0]) next_hops = self.next_hops_info( delete_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, { } ) self.assertEqual( delete_flow_rules[flow2]['node_type'], 'sf_node') self.assertEqual( set(update_flow_rules.keys()), {flow1, flow2, flow3}) add_fcs = update_flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, { ingress1['port']['mac_address']: '10.0.0.1' } ) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') add_fcs = update_flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, { ingress2['port']['mac_address']: '10.0.0.1' } ) self.assertEqual( update_flow_rules[flow2]['node_type'], 'sf_node') add_fcs = update_flow_rules[flow3]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow3].get('next_hops')) self.assertEqual( next_hops, {} ) self.assertEqual( update_flow_rules[flow3]['node_type'], 'sf_node') def test_update_port_chain_delete_port_pair_group(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='port5', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress1, self.port( name='port6', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress1, self.port( name='port7', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress2, self.port( name='port8', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress2: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port['port']['id'] }) as fc, self.port_pair(port_pair={ 'ingress': ingress1['port']['id'], 'egress': egress1['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': ingress2['port']['id'], 'egress': egress2['port']['id'] }) as pp2: pp1_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp1['port_pair'] ) self.driver.create_port_pair(pp1_context) pp2_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp2['port_pair'] ) self.driver.create_port_pair(pp2_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [ pp1['port_pair']['id'] ] }) as pg1, self.port_pair_group(port_pair_group={ 'port_pairs': [ pp2['port_pair']['id'] ] }) as pg2: pg1_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg1['port_pair_group'] ) self.driver.create_port_pair_group(pg1_context) pg2_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg2['port_pair_group'] ) self.driver.create_port_pair_group(pg2_context) with self.port_chain(port_chain={ 'port_pair_groups': [ pg1['port_pair_group']['id'], pg2['port_pair_group']['id'] ], 'flow_classifiers': [ fc['flow_classifier']['id'] ] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() self.init_rpc_calls() updates = { 'port_pair_groups': [ pg1['port_pair_group']['id'] ] } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc['port_chain']['id'] ) res = req.get_response(self.ext_api) pc2 = self.deserialize( self.fmt, res ) pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc2['port_chain'], original_portchain=pc['port_chain'] ) self.driver.update_port_chain(pc_context) self.wait() delete_flow_rules = self.map_flow_rules( self.rpc_calls['delete_flow_rules']) update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress1['port']['id'], egress1['port']['id']) flow3 = self.build_ingress_egress( pc['port_chain']['id'], ingress2['port']['id'], egress2['port']['id']) self.assertEqual( set(delete_flow_rules.keys()), {flow1, flow2, flow3}) del_fcs = delete_flow_rules[flow1]['del_fcs'] self.assertEqual(len(del_fcs), 1) ip_src = ( src_port['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, del_fcs[0]) next_hops = self.next_hops_info( delete_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, { ingress1['port']['mac_address']: '10.0.0.1', } ) self.assertEqual( delete_flow_rules[flow1]['node_type'], 'src_node') del_fcs = delete_flow_rules[flow2]['del_fcs'] self.assertEqual(len(del_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, del_fcs[0]) next_hops = self.next_hops_info( delete_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, { ingress2['port']['mac_address']: '10.0.0.1', } ) self.assertEqual( delete_flow_rules[flow2]['node_type'], 'sf_node') del_fcs = delete_flow_rules[flow3]['del_fcs'] self.assertEqual(len(del_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, del_fcs[0]) next_hops = self.next_hops_info( delete_flow_rules[flow3].get('next_hops')) self.assertEqual( next_hops, { } ) self.assertEqual( delete_flow_rules[flow3]['node_type'], 'sf_node') self.assertEqual( set(update_flow_rules.keys()), {flow1, flow2}) add_fcs = update_flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, { ingress1['port']['mac_address']: '10.0.0.1' } ) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') add_fcs = update_flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, { } ) self.assertEqual( update_flow_rules[flow2]['node_type'], 'sf_node') def test_update_port_chain_replace_port_pair_group(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='port5', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress1, self.port( name='port6', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress1, self.port( name='port7', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress2, self.port( name='port8', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress2: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port['port']['id'] }) as fc, self.port_pair(port_pair={ 'ingress': ingress1['port']['id'], 'egress': egress1['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': ingress2['port']['id'], 'egress': egress2['port']['id'] }) as pp2: pp1_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp1['port_pair'] ) self.driver.create_port_pair(pp1_context) pp2_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp2['port_pair'] ) self.driver.create_port_pair(pp2_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [ pp1['port_pair']['id'] ] }) as pg1, self.port_pair_group(port_pair_group={ 'port_pairs': [ pp2['port_pair']['id'] ] }) as pg2: pg1_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg1['port_pair_group'] ) self.driver.create_port_pair_group(pg1_context) pg2_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg2['port_pair_group'] ) self.driver.create_port_pair_group(pg2_context) with self.port_chain(port_chain={ 'port_pair_groups': [pg1['port_pair_group']['id']], 'flow_classifiers': [ fc['flow_classifier']['id'] ] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() self.init_rpc_calls() updates = { 'port_pair_groups': [ pg2['port_pair_group']['id'] ] } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc['port_chain']['id'] ) res = req.get_response(self.ext_api) pc2 = self.deserialize( self.fmt, res ) pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc2['port_chain'], original_portchain=pc['port_chain'] ) self.driver.update_port_chain(pc_context) self.wait() delete_flow_rules = self.map_flow_rules( self.rpc_calls['delete_flow_rules']) update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress1['port']['id'], egress1['port']['id']) flow3 = self.build_ingress_egress( pc['port_chain']['id'], ingress2['port']['id'], egress2['port']['id']) self.assertEqual( set(delete_flow_rules.keys()), {flow1, flow2}) del_fcs = delete_flow_rules[flow1]['del_fcs'] self.assertEqual(len(del_fcs), 1) ip_src = ( src_port['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, del_fcs[0]) next_hops = self.next_hops_info( delete_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, { ingress1['port']['mac_address']: '10.0.0.1', } ) self.assertEqual( delete_flow_rules[flow1]['node_type'], 'src_node') del_fcs = delete_flow_rules[flow2]['del_fcs'] self.assertEqual(len(del_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, del_fcs[0]) next_hops = self.next_hops_info( delete_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, { } ) self.assertEqual( delete_flow_rules[flow2]['node_type'], 'sf_node') self.assertEqual( set(update_flow_rules.keys()), {flow1, flow3}) add_fcs = update_flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, { ingress2['port']['mac_address']: '10.0.0.1' } ) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') add_fcs = update_flow_rules[flow3]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow3].get('next_hops')) self.assertEqual( next_hops, {} ) self.assertEqual( update_flow_rules[flow3]['node_type'], 'sf_node') def test_agent_init_port_pairs(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='port2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as dst_port: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pp: pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp['port_pair'] ) self.driver.create_port_pair(pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp['port_pair']['id']] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() flow_rules = [] flow_rules_by_portid = {} for host, portid in [( src_port['port']['binding:host_id'], src_port['port']['id'] ), ( dst_port['port']['binding:host_id'], dst_port['port']['id'] )]: flow_rules_by_portid[ portid ] = self.driver.get_flowrules_by_host_portid( self.ctx, host=host, port_id=portid ) flow_rules = self.map_flow_rules( flow_rules, *(flow_rules_by_portid.values()) ) flow1 = self.build_ingress_egress( pc['port_chain']['id'], pp['port_pair']['ingress'], pp['port_pair']['egress']) self.assertEqual( set(flow_rules.keys()), {flow1}) self.assertEqual( flow_rules[flow1]['add_fcs'], []) self.assertEqual( flow_rules[flow1]['del_fcs'], []) self.assertEqual( flow_rules[flow1]['node_type'], 'sf_node') def test_agent_init_flow_classifiers(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_pair_group(port_pair_group={ 'port_pairs': [] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() flow_rules = [] flow_rules_by_portid = {} for host, portid in [( src_port['port']['binding:host_id'], src_port['port']['id'] )]: flow_rules_by_portid[ portid ] = self.driver.get_flowrules_by_host_portid( self.ctx, host=host, port_id=portid ) flow_rules = self.map_flow_rules( flow_rules, *(flow_rules_by_portid.values()) ) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) self.assertEqual( set(flow_rules.keys()), {flow1}) add_fcs = flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) ip_src = ( src_port['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) self.assertEqual( flow_rules[flow1]['del_fcs'], []) self.assertEqual( flow_rules[flow1]['node_type'], 'src_node') self.assertIsNone( flow_rules[flow1].get('next_hops') ) self.assertIsNotNone( flow_rules[flow1]['next_group_id'] ) def test_agent_init_flow_classifiers_port_pairs(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='ingress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress, self.port( name='egress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress: self.host_endpoint_mapping = { 'test': '10.0.0.1', } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_pair(port_pair={ 'ingress': ingress['port']['id'], 'egress': egress['port']['id'] }) as pp: pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp['port_pair'] ) self.driver.create_port_pair(pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp['port_pair']['id']] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() flow_rules = [] flow_rules_by_portid = {} for host, portid in [( src_port['port']['binding:host_id'], src_port['port']['id'] ), ( ingress['port']['binding:host_id'], ingress['port']['id'] ), ( egress['port']['binding:host_id'], egress['port']['id'] )]: flow_rules_by_portid[ portid ] = self.driver.get_flowrules_by_host_portid( self.ctx, host=host, port_id=portid ) flow_rules = self.map_flow_rules( flow_rules, *(flow_rules_by_portid.values()) ) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id'] ) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress['port']['id'], egress['port']['id'] ) self.assertEqual( set(flow_rules.keys()), {flow1, flow2}) add_fcs = flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) ip_src = ( src_port['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, {ingress['port']['mac_address']: '10.0.0.1'}) self.assertEqual( flow_rules[flow1]['node_type'], 'src_node') add_fcs = flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, {}) self.assertEqual( flow_rules[flow2]['node_type'], 'sf_node') def test_agent_init_multi_port_groups_port_pairs(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='ingress1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress1, self.port( name='egress1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress1, self.port( name='ingress2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress2, self.port( name='egress2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress2, self.port( name='ingress3', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress3, self.port( name='egress3', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress3: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_pair(port_pair={ 'ingress': ingress1['port']['id'], 'egress': egress1['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': ingress2['port']['id'], 'egress': egress2['port']['id'] }) as pp2, self.port_pair(port_pair={ 'ingress': ingress3['port']['id'], 'egress': egress3['port']['id'] }) as pp3: pp1_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp1['port_pair'] ) self.driver.create_port_pair(pp1_context) pp2_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp2['port_pair'] ) self.driver.create_port_pair(pp2_context) pp3_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp3['port_pair'] ) self.driver.create_port_pair(pp3_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp1['port_pair']['id']] }) as pg1, self.port_pair_group(port_pair_group={ 'port_pairs': [pp2['port_pair']['id']] }) as pg2, self.port_pair_group(port_pair_group={ 'port_pairs': [pp3['port_pair']['id']] }) as pg3: pg1_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg1['port_pair_group'] ) self.driver.create_port_pair_group(pg1_context) pg2_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg2['port_pair_group'] ) self.driver.create_port_pair_group(pg2_context) pg3_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg3['port_pair_group'] ) self.driver.create_port_pair_group(pg3_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [ pg1['port_pair_group']['id'], pg2['port_pair_group']['id'], pg3['port_pair_group']['id'] ], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() flow_rules = [] flow_rules_by_portid = {} for host, portid in [( src_port['port']['binding:host_id'], src_port['port']['id'] ), ( ingress1['port']['binding:host_id'], ingress1['port']['id'] ), ( egress1['port']['binding:host_id'], egress1['port']['id'] ), ( ingress2['port']['binding:host_id'], ingress2['port']['id'] ), ( egress2['port']['binding:host_id'], egress2['port']['id'] ), ( ingress3['port']['binding:host_id'], ingress3['port']['id'] ), ( egress3['port']['binding:host_id'], egress3['port']['id'] )]: flow_rules_by_portid[ portid ] = self.driver.get_flowrules_by_host_portid( self.ctx, host=host, port_id=portid ) flow_rules = self.map_flow_rules( flow_rules, *(flow_rules_by_portid.values()) ) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress1['port']['id'], egress1['port']['id']) flow3 = self.build_ingress_egress( pc['port_chain']['id'], ingress2['port']['id'], egress2['port']['id']) flow4 = self.build_ingress_egress( pc['port_chain']['id'], ingress3['port']['id'], egress3['port']['id']) self.assertEqual( set(flow_rules.keys()), {flow1, flow2, flow3, flow4}) add_fcs = flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) ip_src = ( src_port['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( flow_rules[flow1].get('next_hops') ) self.assertEqual( next_hops, {ingress1['port']['mac_address']: '10.0.0.1'}) self.assertEqual( flow_rules[flow1]['node_type'], 'src_node') add_fcs = flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, {ingress2['port']['mac_address']: '10.0.0.1'}) self.assertEqual( flow_rules[flow2]['node_type'], 'sf_node') add_fcs = flow_rules[flow3]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( flow_rules[flow3].get('next_hops')) self.assertEqual( next_hops, {ingress3['port']['mac_address']: '10.0.0.1'}) self.assertEqual( flow_rules[flow3]['node_type'], 'sf_node') add_fcs = flow_rules[flow4]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( flow_rules[flow4].get('next_hops')) self.assertEqual( next_hops, {}) self.assertEqual( flow_rules[flow3]['node_type'], 'sf_node') def test_agent_init_port_groups_multi_port_pairs(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='ingress1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress1, self.port( name='egress1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress1, self.port( name='ingress2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress2, self.port( name='egress2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress2, self.port( name='ingress3', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress3, self.port( name='egress3', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress3: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_pair(port_pair={ 'ingress': ingress1['port']['id'], 'egress': egress1['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': ingress2['port']['id'], 'egress': egress2['port']['id'] }) as pp2, self.port_pair(port_pair={ 'ingress': ingress3['port']['id'], 'egress': egress3['port']['id'] }) as pp3: pp1_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp1['port_pair'] ) self.driver.create_port_pair(pp1_context) pp2_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp2['port_pair'] ) self.driver.create_port_pair(pp2_context) pp3_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp3['port_pair'] ) self.driver.create_port_pair(pp3_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [ pp1['port_pair']['id'], pp2['port_pair']['id'], pp3['port_pair']['id'] ] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [ pg['port_pair_group']['id'] ], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() flow_rules = [] flow_rules_by_portid = {} for host, portid in [( src_port['port']['binding:host_id'], src_port['port']['id'] ), ( ingress1['port']['binding:host_id'], ingress1['port']['id'] ), ( egress1['port']['binding:host_id'], egress1['port']['id'] ), ( ingress2['port']['binding:host_id'], ingress2['port']['id'] ), ( egress2['port']['binding:host_id'], egress2['port']['id'] ), ( ingress3['port']['binding:host_id'], ingress3['port']['id'] ), ( egress3['port']['binding:host_id'], egress3['port']['id'] )]: flow_rules_by_portid[ portid ] = self.driver.get_flowrules_by_host_portid( self.ctx, host=host, port_id=portid ) flow_rules = self.map_flow_rules( flow_rules, *(flow_rules_by_portid.values()) ) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress1['port']['id'], egress1['port']['id']) flow3 = self.build_ingress_egress( pc['port_chain']['id'], ingress2['port']['id'], egress2['port']['id']) flow4 = self.build_ingress_egress( pc['port_chain']['id'], ingress3['port']['id'], egress3['port']['id']) self.assertEqual( set(flow_rules.keys()), {flow1, flow2, flow3, flow4}) add_fcs = flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) ip_src = ( src_port['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( flow_rules[flow1].get('next_hops') ) self.assertEqual(next_hops, { ingress1['port']['mac_address']: '10.0.0.1', ingress2['port']['mac_address']: '10.0.0.1', ingress3['port']['mac_address']: '10.0.0.1' }) self.assertEqual( flow_rules[flow1]['node_type'], 'src_node') add_fcs = flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, {}) self.assertEqual( flow_rules[flow2]['node_type'], 'sf_node') add_fcs = flow_rules[flow3]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( flow_rules[flow3].get('next_hops')) self.assertEqual( next_hops, {}) self.assertEqual( flow_rules[flow3]['node_type'], 'sf_node') add_fcs = flow_rules[flow4]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( flow_rules[flow4].get('next_hops')) self.assertEqual( next_hops, {}) self.assertEqual( flow_rules[flow4]['node_type'], 'sf_node') def test_agent_init_multi_flow_classifiers_port_pairs(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port1, self.port( name='port3', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port2, self.port( name='ingress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress, self.port( name='egress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port1['port']['id'] }) as fc1, self.flow_classifier(flow_classifier={ 'logical_source_port': src_port2['port']['id'] }) as fc2: with self.port_pair(port_pair={ 'ingress': ingress['port']['id'], 'egress': egress['port']['id'] }) as pp: pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp['port_pair'] ) self.driver.create_port_pair(pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp['port_pair']['id']] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [ fc1['flow_classifier']['id'], fc2['flow_classifier']['id'] ] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() flow_rules = [] flow_rules_by_portid = {} for host, portid in [( src_port1['port']['binding:host_id'], src_port1['port']['id'] ), ( src_port2['port']['binding:host_id'], src_port2['port']['id'] ), ( ingress['port']['binding:host_id'], ingress['port']['id'] ), ( egress['port']['binding:host_id'], egress['port']['id'] )]: flow_rules_by_portid[ portid ] = self.driver.get_flowrules_by_host_portid( self.ctx, host=host, port_id=portid ) flow_rules = self.map_flow_rules( flow_rules, *(flow_rules_by_portid.values()) ) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port1['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port2['port']['id']) flow3 = self.build_ingress_egress( pc['port_chain']['id'], ingress['port']['id'], egress['port']['id']) self.assertEqual( set(flow_rules.keys()), {flow1, flow2, flow3}) add_fcs = flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) ip_src1 = ( src_port1['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src1, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, {ingress['port']['mac_address']: '10.0.0.1'} ) self.assertEqual( flow_rules[flow1]['node_type'], 'src_node') add_fcs = flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 1) ip_src2 = ( src_port2['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src2, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, {ingress['port']['mac_address']: '10.0.0.1'} ) self.assertEqual( flow_rules[flow2]['node_type'], 'src_node') add_fcs = flow_rules[flow3]['add_fcs'] self.assertEqual(len(add_fcs), 2) self._assert_flow_classifiers_match_subsets( add_fcs, [{ 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src1, 'source_port_range_max': None, 'source_port_range_min': None }, { 'destination_ip_prefix': None, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': u'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src2, 'source_port_range_max': None, 'source_port_range_min': None }], 'source_ip_prefix') next_hops = self.next_hops_info( flow_rules[flow3].get('next_hops')) self.assertEqual( next_hops, {} ) self.assertEqual( flow_rules[flow3]['node_type'], 'sf_node') def _test_agent_init_service_graphs_end( self, lsport, pc1port, pc2port1, pc2port2, pc3port, pc4port, pc1fc, pc2fc, pc3fc, pc4fc, pc1pp, pc2pp1, pc2pp2, pc3pp, pc4pp, correlation): pc1pp_context = sfc_ctx.PortPairContext(self.sfc_plugin, self.ctx, pc1pp['port_pair']) pc2pp1_context = sfc_ctx.PortPairContext(self.sfc_plugin, self.ctx, pc2pp1['port_pair']) pc2pp2_context = sfc_ctx.PortPairContext(self.sfc_plugin, self.ctx, pc2pp2['port_pair']) pc3pp_context = sfc_ctx.PortPairContext(self.sfc_plugin, self.ctx, pc3pp['port_pair']) pc4pp_context = sfc_ctx.PortPairContext(self.sfc_plugin, self.ctx, pc4pp['port_pair']) self.driver.create_port_pair(pc1pp_context) self.driver.create_port_pair(pc2pp1_context) self.driver.create_port_pair(pc2pp2_context) self.driver.create_port_pair(pc3pp_context) self.driver.create_port_pair(pc4pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pc1pp['port_pair']['id']]} ) as pc1pg, self.port_pair_group(port_pair_group={ 'port_pairs': [pc2pp1['port_pair']['id']]} ) as pc2pg1, self.port_pair_group(port_pair_group={ 'port_pairs': [pc2pp2['port_pair']['id']]} ) as pc2pg2, self.port_pair_group(port_pair_group={ 'port_pairs': [pc3pp['port_pair']['id']]} ) as pc3pg, self.port_pair_group(port_pair_group={ 'port_pairs': [pc4pp['port_pair']['id']]} ) as pc4pg: pc1pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pc1pg['port_pair_group'] ) pc2pg1_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pc2pg1['port_pair_group'] ) pc2pg2_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pc2pg2['port_pair_group'] ) pc3pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pc3pg['port_pair_group'] ) pc4pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pc4pg['port_pair_group'] ) self.driver.create_port_pair_group(pc1pg_context) self.driver.create_port_pair_group(pc2pg1_context) self.driver.create_port_pair_group(pc2pg2_context) self.driver.create_port_pair_group(pc3pg_context) self.driver.create_port_pair_group(pc4pg_context) with self.port_chain(port_chain={ 'chain_parameters': { 'correlation': correlation}, 'port_pair_groups': [ pc1pg['port_pair_group']['id']], 'flow_classifiers': [ pc1fc['flow_classifier']['id']]} ) as pc1, self.port_chain(port_chain={ 'chain_parameters': { 'correlation': correlation}, 'port_pair_groups': [ # different amount of PPGs for pc2 just to complicate pc2pg1['port_pair_group']['id'], pc2pg2['port_pair_group']['id']], 'flow_classifiers': [ pc2fc['flow_classifier']['id']]} ) as pc2, self.port_chain(port_chain={ 'chain_parameters': { 'correlation': correlation}, 'port_pair_groups': [ pc3pg['port_pair_group']['id']], 'flow_classifiers': [ pc3fc['flow_classifier']['id']]} ) as pc3, self.port_chain(port_chain={ 'chain_parameters': { 'correlation': correlation}, 'port_pair_groups': [ pc4pg['port_pair_group']['id']], 'flow_classifiers': [ pc4fc['flow_classifier']['id']]} ) as pc4: pc1_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc1['port_chain'] ) pc2_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc2['port_chain'] ) pc3_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc3['port_chain'] ) pc4_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc4['port_chain'] ) self.driver.create_port_chain(pc1_context) self.driver.create_port_chain(pc2_context) self.driver.create_port_chain(pc3_context) self.driver.create_port_chain(pc4_context) with self.service_graph(service_graph={ 'name': 'graph', 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id'], pc3['port_chain']['id']], pc2['port_chain']['id']: [pc4['port_chain']['id']], pc3['port_chain']['id']: [pc4['port_chain']['id']]}} ) as g: all_ports = [lsport, pc1port, pc2port1, pc2port2, pc3port, pc4port] g_context = sfc_ctx.ServiceGraphContext( self.sfc_plugin, self.ctx, g['service_graph'] ) self.driver.create_service_graph_postcommit( g_context) self.wait() flow_rules = [] flow_rules_by_portid = {} for host, portid in [( p['port']['binding:host_id'], p['port']['id'], ) for p in all_ports]: flow_rules_by_portid[ portid ] = self.driver.get_flowrules_by_host_portid( self.ctx, host=host, port_id=portid ) flow_rules = self.map_flow_rules( flow_rules, *(flow_rules_by_portid.values()) ) # verify fr count: 2 in pc1, 3 in pc2, 2 in pc3, 2 in pc4: self.assertEqual(len(flow_rules), 9) pc1_flow_rules = [key for key in flow_rules if key.split( ':')[0] == pc1['port_chain']['id'][:8]] pc2_flow_rules = [key for key in flow_rules if key.split( ':')[0] == pc2['port_chain']['id'][:8]] pc3_flow_rules = [key for key in flow_rules if key.split( ':')[0] == pc3['port_chain']['id'][:8]] pc4_flow_rules = [key for key in flow_rules if key.split( ':')[0] == pc4['port_chain']['id'][:8]] self.assertEqual(len(pc1_flow_rules), 2) self.assertEqual(len(pc2_flow_rules), 3) self.assertEqual(len(pc3_flow_rules), 2) self.assertEqual(len(pc4_flow_rules), 2) # verify pc1's branching flow rule (last sf_node): key = self.build_ingress_egress_from_pp( pc1['port_chain']['id'], pc1pp['port_pair']) self.assertEqual(flow_rules[key]['nsp'], 1) self.assertEqual(flow_rules[key]['nsi'], 254) self.assertEqual(flow_rules[key]['node_type'], 'sf_node') self.assertIsNone(flow_rules[key]['next_group_id']) self.assertIsNone(flow_rules[key]['next_hop']) self.assertFalse('branch_info' in flow_rules[key]) self.assertTrue(flow_rules[key]['branch_point']) # verify pc2's matching flow rule (src_node): key = self.build_ingress_egress( pc2['port_chain']['id'], None, lsport['port']['id']) self.assertEqual(flow_rules[key]['nsp'], 2) self.assertEqual(flow_rules[key]['nsi'], 255) self.assertEqual(flow_rules[key]['node_type'], 'src_node') self.assertIsNotNone(flow_rules[key]['next_group_id']) self.assertIsNotNone(flow_rules[key]['next_hop']) self.assertTrue('branch_info' in flow_rules[key]) branch_matches = flow_rules[key]['branch_info']['matches'] self.assertEqual(len(branch_matches), 1) self.assertTrue((1, 254,) in branch_matches) self.assertFalse('branch_point' in flow_rules[key]) # verify pc2's branching flow rule (last sf_node): key = self.build_ingress_egress_from_pp( pc2['port_chain']['id'], pc2pp2['port_pair']) self.assertEqual(flow_rules[key]['nsp'], 2) self.assertEqual(flow_rules[key]['nsi'], 253) self.assertEqual(flow_rules[key]['node_type'], 'sf_node') self.assertIsNone(flow_rules[key]['next_group_id']) self.assertIsNone(flow_rules[key]['next_hop']) self.assertFalse('branch_info' in flow_rules[key]) self.assertTrue(flow_rules[key]['branch_point']) # verify pc3's matching flow rule (src_node): key = self.build_ingress_egress( pc3['port_chain']['id'], None, lsport['port']['id']) self.assertEqual(flow_rules[key]['nsp'], 3) self.assertEqual(flow_rules[key]['nsi'], 255) self.assertEqual(flow_rules[key]['node_type'], 'src_node') self.assertIsNotNone(flow_rules[key]['next_group_id']) self.assertIsNotNone(flow_rules[key]['next_hop']) self.assertTrue('branch_info' in flow_rules[key]) branch_matches = flow_rules[key]['branch_info']['matches'] self.assertEqual(len(branch_matches), 1) self.assertTrue((1, 254,) in branch_matches) self.assertFalse('branch_point' in flow_rules[key]) # verify pc3's branching flow rule (last sf_node): key = self.build_ingress_egress_from_pp( pc3['port_chain']['id'], pc3pp['port_pair']) self.assertEqual(flow_rules[key]['nsp'], 3) self.assertEqual(flow_rules[key]['nsi'], 254) self.assertEqual(flow_rules[key]['node_type'], 'sf_node') self.assertIsNone(flow_rules[key]['next_group_id']) self.assertIsNone(flow_rules[key]['next_hop']) self.assertFalse('branch_info' in flow_rules[key]) self.assertTrue(flow_rules[key]['branch_point']) # verify pc4's matching flow rule (last sf_node): key = self.build_ingress_egress( pc4['port_chain']['id'], None, lsport['port']['id']) self.assertEqual(flow_rules[key]['nsp'], 4) self.assertEqual(flow_rules[key]['nsi'], 255) self.assertEqual(flow_rules[key]['node_type'], 'src_node') self.assertIsNotNone(flow_rules[key]['next_group_id']) self.assertIsNotNone(flow_rules[key]['next_hop']) self.assertTrue('branch_info' in flow_rules[key]) branch_matches = flow_rules[key]['branch_info']['matches'] self.assertEqual(len(branch_matches), 2) self.assertTrue((2, 253,) in branch_matches) self.assertTrue((3, 254,) in branch_matches) self.assertFalse('branch_point' in flow_rules[key]) # verify that all other flow rules are normal: key = self.build_ingress_egress( pc1['port_chain']['id'], None, lsport['port']['id']) self.assertFalse('branch_info' in flow_rules[key]) self.assertFalse('branch_point' in flow_rules[key]) key = self.build_ingress_egress_from_pp( pc2['port_chain']['id'], pc2pp1['port_pair']) self.assertFalse('branch_info' in flow_rules[key]) self.assertFalse('branch_point' in flow_rules[key]) key = self.build_ingress_egress_from_pp( pc4['port_chain']['id'], pc4pp['port_pair']) self.assertFalse('branch_info' in flow_rules[key]) self.assertFalse('branch_point' in flow_rules[key]) # this test will create a graph with both normal/forking branches # and joining branches, using 4 port chains in total, and will verify # that the driver is able to provide the newly-started agent with # the correct flow rules so that the latter can restore the flows. def _test_agent_init_service_graphs(self, correlation): with self.port( name='lsport', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as lsport, self.port( name='pc1port', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as pc1port, self.port( name='pc2port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as pc2port1, self.port( name='pc2port2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as pc2port2, self.port( name='pc3port', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as pc3port, self.port( name='pc4port', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as pc4port: self.host_endpoint_mapping = {'test': '10.0.0.1'} with self.flow_classifier(flow_classifier={ 'logical_source_port': lsport['port']['id'], 'destination_ip_prefix': '192.0.2.1/32'} ) as pc1fc, self.flow_classifier(flow_classifier={ 'logical_source_port': lsport['port']['id'], 'destination_ip_prefix': '192.0.2.2/32'} ) as pc2fc, self.flow_classifier(flow_classifier={ 'logical_source_port': lsport['port']['id'], 'destination_ip_prefix': '192.0.2.3/32'} ) as pc3fc, self.flow_classifier(flow_classifier={ 'logical_source_port': lsport['port']['id'], 'destination_ip_prefix': '192.0.2.4/32'} ) as pc4fc, self.port_pair(port_pair={ 'service_function_parameters': {'correlation': correlation}, 'ingress': pc1port['port']['id'], 'egress': pc1port['port']['id']} ) as pc1pp, self.port_pair(port_pair={ 'service_function_parameters': {'correlation': correlation}, 'ingress': pc2port1['port']['id'], 'egress': pc2port1['port']['id']} ) as pc2pp1, self.port_pair(port_pair={ 'service_function_parameters': {'correlation': correlation}, 'ingress': pc2port2['port']['id'], 'egress': pc2port2['port']['id']} ) as pc2pp2, self.port_pair(port_pair={ 'service_function_parameters': {'correlation': correlation}, 'ingress': pc3port['port']['id'], 'egress': pc3port['port']['id']} ) as pc3pp, self.port_pair(port_pair={ 'service_function_parameters': {'correlation': correlation}, 'ingress': pc4port['port']['id'], 'egress': pc4port['port']['id']} ) as pc4pp: # main reason for splitting this method in 2 is having # more than 20 contexts self._test_agent_init_service_graphs_end( lsport, pc1port, pc2port1, pc2port2, pc3port, pc4port, pc1fc, pc2fc, pc3fc, pc4fc, pc1pp, pc2pp1, pc2pp2, pc3pp, pc4pp, correlation) def test_agent_init_service_graphs_mpls(self): self._test_agent_init_service_graphs('mpls') def test_agent_init_service_graphs_nsh(self): self._test_agent_init_service_graphs('nsh') def test_create_port_chain_cross_subnet_ppg(self): with self.subnet( gateway_ip='10.0.0.10', cidr='10.0.0.0/24' ) as subnet1, self.subnet( gateway_ip='10.0.1.10', cidr='10.0.1.0/24' ) as subnet2: with self.port( name='port1', device_owner='compute', device_id='test', subnet=subnet1, arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='ingress1', device_owner='compute', device_id='test', subnet=subnet1, arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress1, self.port( name='egress1', device_owner='compute', device_id='test', subnet=subnet1, arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} )as egress1, self.port( name='ingress2', device_owner='compute', device_id='test', subnet=subnet2, arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress2, self.port( name='egress2', device_owner='compute', device_id='test', subnet=subnet2, arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} )as egress2: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_pair(port_pair={ 'ingress': ingress1['port']['id'], 'egress': egress1['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': ingress2['port']['id'], 'egress': egress2['port']['id'] }) as pp2: pp1_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp1['port_pair'] ) self.driver.create_port_pair(pp1_context) pp2_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp2['port_pair'] ) self.driver.create_port_pair(pp2_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp1['port_pair']['id']] }) as pg1, self.port_pair_group(port_pair_group={ 'port_pairs': [pp2['port_pair']['id']] }) as pg2: pg1_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg1['port_pair_group'] ) self.driver.create_port_pair_group(pg1_context) pg2_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg2['port_pair_group'] ) self.driver.create_port_pair_group(pg2_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [ pg1['port_pair_group']['id'], pg2['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) result = self.driver.create_port_chain( pc_context) self.assertIsNone(result) def test_create_port_chain_cross_subnet_source(self): with self.subnet( gateway_ip='10.0.0.10', cidr='10.0.0.0/24' )as subnet1, self.subnet( gateway_ip='10.0.1.10', cidr='10.0.1.0/24' )as subnet2: with self.port( name='port1', device_owner='compute', device_id='test', subnet=subnet1, arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='ingress1', device_owner='compute', device_id='test', subnet=subnet2, arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress1, self.port( name='egress1', device_owner='compute', device_id='test', subnet=subnet2, arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress1, self.port( name='ingress2', device_owner='compute', device_id='test', subnet=subnet2, arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress2, self.port( name='egress2', device_owner='compute', device_id='test', subnet=subnet2, arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress2: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': 'tcp', 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_pair(port_pair={ 'ingress': ingress1['port']['id'], 'egress': egress1['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': ingress2['port']['id'], 'egress': egress2['port']['id'] }) as pp2: pp1_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp1['port_pair'] ) self.driver.create_port_pair(pp1_context) pp2_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp2['port_pair'] ) self.driver.create_port_pair(pp2_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp1['port_pair']['id']] }) as pg1, self.port_pair_group(port_pair_group={ 'port_pairs': [pp2['port_pair']['id']] }) as pg2: pg1_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg1['port_pair_group'] ) self.driver.create_port_pair_group(pg1_context) pg2_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg2['port_pair_group'] ) self.driver.create_port_pair_group(pg2_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [ pg1['port_pair_group']['id'], pg2['port_pair_group']['id'] ], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) result = self.driver.create_port_chain( pc_context) self.assertIsNone(result) def test_create_port_chain_with_symmetric(self): with self.port_pair_group(port_pair_group={ 'name': 'test1', }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'chain_parameters': {'symmetric': True} }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() self.assertEqual(self.rpc_calls['update_flow_rules'], []) def test_create_port_chain_precommit_symmetric_no_logical_dst_port(self): with self.port( name='src', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port: self.host_endpoint_mapping = { 'test': '10.0.0.1', } with self.flow_classifier(flow_classifier={ 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 300, 'destination_port_range_max': 400, 'ethertype': 'IPv4', 'source_ip_prefix': '10.100.0.0/16', 'destination_ip_prefix': '10.200.0.0/16', 'l7_parameters': {}, 'protocol': 'tcp', 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_pair_group(port_pair_group={ 'port_pairs': [] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']], 'chain_parameters': {'symmetric': True} }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.assertRaises( sfc_exc.SfcBadRequest, self.driver.create_port_chain_precommit, pc_context ) def test_create_port_chain_with_flow_classifiers_with_symmetric(self): with self.port( name='src', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='dst', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as dst_port: self.host_endpoint_mapping = { 'test': '10.0.0.1', } with self.flow_classifier(flow_classifier={ 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 300, 'destination_port_range_max': 400, 'ethertype': 'IPv4', 'source_ip_prefix': '10.100.0.0/16', 'destination_ip_prefix': '10.200.0.0/16', 'l7_parameters': {}, 'protocol': 'tcp', 'logical_source_port': src_port['port']['id'], 'logical_destination_port': dst_port['port']['id'] }) as fc: with self.port_pair_group(port_pair_group={ 'port_pairs': [] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']], 'chain_parameters': {'symmetric': True} }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, fc['flow_classifier']['logical_source_port']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], None, fc['flow_classifier']['logical_destination_port']) self.assertEqual( set(update_flow_rules.keys()), {flow1, flow2}) self.assertEqual( len(update_flow_rules[flow1]['add_fcs']), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': u'tcp', 'source_ip_prefix': u'10.100.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, update_flow_rules[flow1]['add_fcs'][0]) self.assertEqual( update_flow_rules[flow1]['del_fcs'], []) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') self.assertIsNone( update_flow_rules[flow1].get('next_hops') ) self.assertIsNotNone( update_flow_rules[flow1]['next_group_id'] ) self.assertEqual( len(update_flow_rules[flow2]['add_fcs']), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': u'tcp', 'source_ip_prefix': u'10.100.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, update_flow_rules[flow2]['add_fcs'][0]) self.assertEqual( update_flow_rules[flow2]['del_fcs'], []) self.assertEqual( update_flow_rules[flow2]['node_type'], 'src_node') self.assertIsNone( update_flow_rules[flow2].get('next_hops') ) self.assertIsNotNone( update_flow_rules[flow2]['next_group_id'] ) def test_create_port_chain_with_fcs_port_pairs_with_symmetric(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='ingress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress, self.port( name='egress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress, self.port( name='port2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as dst_port: self.host_endpoint_mapping = { 'test': '10.0.0.1', } with self.flow_classifier(flow_classifier={ 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 300, 'destination_port_range_max': 400, 'ethertype': 'IPv4', 'source_ip_prefix': '10.100.0.0/16', 'destination_ip_prefix': '10.200.0.0/16', 'l7_parameters': {}, 'protocol': 'tcp', 'logical_source_port': src_port['port']['id'], 'logical_destination_port': dst_port['port']['id'] }) as fc: with self.port_pair(port_pair={ 'ingress': ingress['port']['id'], 'egress': egress['port']['id'] }) as pp: pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp['port_pair'] ) self.driver.create_port_pair(pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp['port_pair']['id']] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']], 'chain_parameters': {'symmetric': True} }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress['port']['id'], egress['port']['id']) flow3 = self.build_ingress_egress( pc['port_chain']['id'], None, dst_port['port']['id']) flow4 = self.build_ingress_egress( pc['port_chain']['id'], ingress['port']['id'], egress['port']['id']) self.assertEqual( set(update_flow_rules.keys()), {flow1, flow2, flow3, flow4}) add_fcs = update_flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': 'tcp', 'source_ip_prefix': '10.100.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, {ingress['port']['mac_address']: '10.0.0.1'}) self.assertIsNotNone( update_flow_rules[flow1]['next_group_id']) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') add_fcs = update_flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 2) self._assert_flow_classifiers_match_subsets( add_fcs, [{ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': 'tcp', 'source_ip_prefix': '10.100.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }] * 2) next_hops = self.next_hops_info( update_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, {}) self.assertIsNone( update_flow_rules[flow2]['next_group_id']) self.assertEqual( update_flow_rules[flow2]['node_type'], 'sf_node') add_fcs = update_flow_rules[flow3]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': 'tcp', 'source_ip_prefix': '10.100.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow3].get('next_hops')) self.assertEqual( next_hops, {ingress['port']['mac_address']: '10.0.0.1'}) self.assertIsNotNone( update_flow_rules[flow3]['next_group_id']) self.assertEqual( update_flow_rules[flow3]['node_type'], 'src_node') add_fcs = update_flow_rules[flow4]['add_fcs'] self.assertEqual(len(add_fcs), 2) self._assert_flow_classifiers_match_subsets( add_fcs, [{ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': 'tcp', 'source_ip_prefix': '10.100.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }] * 2) next_hops = self.next_hops_info( update_flow_rules[flow4].get('next_hops')) self.assertEqual( next_hops, {}) self.assertIsNone( update_flow_rules[flow4]['next_group_id']) self.assertEqual( update_flow_rules[flow4]['node_type'], 'sf_node') def test_create_port_chain_with_multi_fcs_port_pairs_with_symmetric(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port1, self.port( name='port3', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port2, self.port( name='ingress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress, self.port( name='egress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress, self.port( name='port2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as dst_port1, self.port( name='port4', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as dst_port2: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'logical_source_port': src_port1['port']['id'], 'logical_destination_port': dst_port1['port']['id'] }) as fc1, self.flow_classifier(flow_classifier={ 'logical_source_port': src_port2['port']['id'], 'logical_destination_port': dst_port2['port']['id'] }) as fc2: with self.port_pair(port_pair={ 'ingress': ingress['port']['id'], 'egress': egress['port']['id'] }) as pp: pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp['port_pair'] ) self.driver.create_port_pair(pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp['port_pair']['id']] }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [ fc1['flow_classifier']['id'], fc2['flow_classifier']['id'] ], 'chain_parameters': {'symmetric': True} }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port1['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port2['port']['id']) flow3 = self.build_ingress_egress( pc['port_chain']['id'], ingress['port']['id'], egress['port']['id']) flow4 = self.build_ingress_egress( pc['port_chain']['id'], None, dst_port1['port']['id']) flow5 = self.build_ingress_egress( pc['port_chain']['id'], None, dst_port2['port']['id']) flow6 = self.build_ingress_egress( pc['port_chain']['id'], ingress['port']['id'], egress['port']['id']) self.assertEqual( set(update_flow_rules.keys()), {flow1, flow2, flow3, flow4, flow5, flow6}) add_fcs = update_flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) ip_src1 = ( src_port1['port']['fixed_ips'][0]['ip_address'] ) ip_dst1 = ( dst_port1['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': ip_dst1, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src1, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, { ingress['port']['mac_address']: '10.0.0.1' } ) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') add_fcs = update_flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 1) ip_src2 = ( src_port2['port']['fixed_ips'][0]['ip_address'] ) ip_dst2 = ( dst_port2['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': ip_dst2, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src2, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, { ingress['port']['mac_address']: '10.0.0.1' } ) self.assertEqual( update_flow_rules[flow2]['node_type'], 'src_node') add_fcs = update_flow_rules[flow3]['add_fcs'] self.assertEqual(len(add_fcs), 4) self.assertDictContainsSubset({ 'destination_ip_prefix': ip_dst1, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src1, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) self.assertDictContainsSubset({ 'destination_ip_prefix': ip_dst2, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src2, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[1]) next_hops = self.next_hops_info( update_flow_rules[flow3].get('next_hops')) self.assertEqual( next_hops, { } ) self.assertEqual( update_flow_rules[flow3]['node_type'], 'sf_node') add_fcs = update_flow_rules[flow4]['add_fcs'] self.assertEqual(len(add_fcs), 1) ip_src1 = ( src_port1['port']['fixed_ips'][0]['ip_address'] ) ip_dst1 = ( dst_port1['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': ip_dst1, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src1, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow4].get('next_hops')) self.assertEqual( next_hops, { ingress['port']['mac_address']: '10.0.0.1' } ) self.assertEqual( update_flow_rules[flow4]['node_type'], 'src_node') add_fcs = update_flow_rules[flow5]['add_fcs'] self.assertEqual(len(add_fcs), 1) ip_src2 = ( src_port2['port']['fixed_ips'][0]['ip_address'] ) ip_dst2 = ( dst_port2['port']['fixed_ips'][0]['ip_address'] ) self.assertDictContainsSubset({ 'destination_ip_prefix': ip_dst2, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src2, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow5].get('next_hops')) self.assertEqual( next_hops, { ingress['port']['mac_address']: '10.0.0.1' } ) self.assertEqual( update_flow_rules[flow5]['node_type'], 'src_node') add_fcs = update_flow_rules[flow6]['add_fcs'] self.assertEqual(len(add_fcs), 4) self.assertDictContainsSubset({ 'destination_ip_prefix': ip_dst1, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src1, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[0]) self.assertDictContainsSubset({ 'destination_ip_prefix': ip_dst2, 'destination_port_range_max': None, 'destination_port_range_min': None, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': None, 'source_ip_prefix': ip_src2, 'source_port_range_max': None, 'source_port_range_min': None }, add_fcs[1]) next_hops = self.next_hops_info( update_flow_rules[flow6].get('next_hops')) self.assertEqual( next_hops, { } ) self.assertEqual( update_flow_rules[flow6]['node_type'], 'sf_node') def test_create_port_chain_fcs_port_pairs_ppg_n_tuple_symmetric(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='ingress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress, self.port( name='egress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress, self.port( name='port2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as dst_port: self.host_endpoint_mapping = {'test': '10.0.0.1'} with self.flow_classifier(flow_classifier={ 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 300, 'destination_port_range_max': 400, 'ethertype': 'IPv4', 'source_ip_prefix': '10.100.0.0/16', 'destination_ip_prefix': '10.200.0.0/16', 'l7_parameters': {}, 'protocol': 'tcp', 'logical_source_port': src_port['port']['id'], 'logical_destination_port': dst_port['port']['id'] }) as fc: with self.port_pair(port_pair={ 'ingress': ingress['port']['id'], 'egress': egress['port']['id'] }) as pp: pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp['port_pair'] ) self.driver.create_port_pair(pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp['port_pair']['id']], 'port_pair_group_parameters': { 'ppg_n_tuple_mapping': { 'ingress_n_tuple': { 'source_ip_prefix': '10.100.0.0/16'}, 'egress_n_tuple': { 'source_ip_prefix': '10.300.0.0/16'} } } }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']], 'chain_parameters': {'symmetric': True} }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress['port']['id'], egress['port']['id']) flow3 = self.build_ingress_egress( pc['port_chain']['id'], None, dst_port['port']['id']) flow4 = self.build_ingress_egress( pc['port_chain']['id'], ingress['port']['id'], egress['port']['id']) self.assertEqual( set(update_flow_rules.keys()), {flow1, flow2, flow3, flow4}) add_fcs = update_flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': 'tcp', 'source_ip_prefix': '10.100.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow1].get('next_hops')) self.assertEqual( next_hops, {ingress['port']['mac_address']: '10.0.0.1'}) self.assertIsNotNone( update_flow_rules[flow1]['next_group_id']) self.assertEqual( update_flow_rules[flow1]['node_type'], 'src_node') add_fcs = update_flow_rules[flow2]['add_fcs'] self.assertEqual(len(add_fcs), 2) self.assertDictContainsSubset({ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': 'tcp', 'source_ip_prefix': '10.300.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow2].get('next_hops')) self.assertEqual( next_hops, {}) self.assertIsNone( update_flow_rules[flow2]['next_group_id']) self.assertEqual( update_flow_rules[flow2]['node_type'], 'sf_node') add_fcs = update_flow_rules[flow3]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertDictContainsSubset({ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': 'tcp', 'source_ip_prefix': '10.300.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow3].get('next_hops')) self.assertEqual( next_hops, {ingress['port']['mac_address']: '10.0.0.1'}) self.assertIsNotNone( update_flow_rules[flow3]['next_group_id']) self.assertEqual( update_flow_rules[flow3]['node_type'], 'src_node') add_fcs = update_flow_rules[flow4]['add_fcs'] self.assertEqual(len(add_fcs), 2) self.assertDictContainsSubset({ 'destination_ip_prefix': '10.200.0.0/16', 'destination_port_range_max': 400, 'destination_port_range_min': 300, 'ethertype': 'IPv4', 'l7_parameters': {}, 'protocol': 'tcp', 'source_ip_prefix': '10.300.0.0/16', 'source_port_range_max': 200, 'source_port_range_min': 100 }, add_fcs[0]) next_hops = self.next_hops_info( update_flow_rules[flow4].get('next_hops')) self.assertEqual( next_hops, {}) self.assertIsNone( update_flow_rules[flow4]['next_group_id']) self.assertEqual( update_flow_rules[flow4]['node_type'], 'sf_node') # this test will create the simplest possible graph, from a port chain # with a single pp/ppg, to another port chain with a single pp/ppg, # in the same host and using trivial flow classifiers. def _test_create_service_graph(self, correlation): with self.port( name='pc1port', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as pc1sourceport, self.port( name='pc2port', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as pc2sourceport, self.port( name='pc2port', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as pc1port, self.port( name='pc2port', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as pc2port: self.host_endpoint_mapping = { 'test': '10.0.0.1' } with self.flow_classifier(flow_classifier={ 'logical_source_port': pc1sourceport['port']['id'], 'protocol': 'tcp'} ) as pc1fc, self.flow_classifier(flow_classifier={ # when attached to the graph, this LSP gets ignored 'logical_source_port': pc2sourceport['port']['id'], 'protocol': 'udp'} ) as pc2fc: with self.port_pair(port_pair={ 'service_function_parameters': { 'correlation': correlation}, 'ingress': pc1port['port']['id'], 'egress': pc1port['port']['id']} ) as pc1pp, self.port_pair(port_pair={ 'service_function_parameters': { 'correlation': correlation}, 'ingress': pc2port['port']['id'], 'egress': pc2port['port']['id']} ) as pc2pp: pc1pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pc1pp['port_pair'] ) pc2pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pc2pp['port_pair'] ) self.driver.create_port_pair(pc1pp_context) self.driver.create_port_pair(pc2pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pc1pp['port_pair']['id']]} ) as pc1pg, self.port_pair_group(port_pair_group={ 'port_pairs': [pc2pp['port_pair']['id']]} ) as pc2pg: pc1pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pc1pg['port_pair_group'] ) pc2pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pc2pg['port_pair_group'] ) self.driver.create_port_pair_group(pc1pg_context) self.driver.create_port_pair_group(pc2pg_context) with self.port_chain(port_chain={ 'chain_parameters': { 'correlation': correlation}, 'port_pair_groups': [ pc1pg['port_pair_group']['id']], 'flow_classifiers': [ pc1fc['flow_classifier']['id']]} ) as pc1, self.port_chain(port_chain={ 'chain_parameters': { 'correlation': correlation}, 'port_pair_groups': [ pc2pg['port_pair_group']['id']], 'flow_classifiers': [ pc2fc['flow_classifier']['id']]} ) as pc2: pc1_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc1['port_chain'] ) pc2_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc2['port_chain'] ) self.driver.create_port_chain(pc1_context) self.driver.create_port_chain(pc2_context) # original port-chains' flow rules update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) # flow rule for the end of the src chain (PPG) flow1_end = self.build_ingress_egress( pc1['port_chain']['id'], pc1port['port']['id'], pc1port['port']['id']) # flow rule for the start of the dst chain (PPG) flow2_sta = self.build_ingress_egress( pc2['port_chain']['id'], None, pc2fc['flow_classifier']['logical_source_port'] ) # old_add1 is the original add_fcs of the src PPG old_add1 = update_flow_rules[flow1_end]['add_fcs'] # old_add2 is the original add_fcs of the dst PPG old_add2 = update_flow_rules[flow2_sta]['add_fcs'] # clear port-chains' flow rules to focus on graph self.init_rpc_calls() with self.service_graph(service_graph={ 'name': 'test1', 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id']]}} ) as g: g_context = sfc_ctx.ServiceGraphContext( self.sfc_plugin, self.ctx, g['service_graph'] ) self.driver.create_service_graph_postcommit( g_context) self.wait() ufr = self.map_flow_rules( self.rpc_calls['update_flow_rules']) # assert that the common "nodes" of linked # chains have had their flow rules replaced self.assertEqual(set(ufr.keys()), set([flow1_end, flow2_sta])) self.assertEqual( ufr[flow1_end]['node_type'], 'sf_node' ) self.assertEqual( ufr[flow2_sta]['node_type'], 'src_node' ) self.assertDictContainsSubset({ 'branch_point': True }, ufr[flow1_end]) # dependent chain must match on dependency # chain and have the expected identifiers self.assertEqual(ufr[flow2_sta][ 'branch_info']['matches'][0][0], 1) self.assertEqual(ufr[flow2_sta][ 'branch_info']['matches'][0][1], 254) self.assertEqual(ufr[ flow2_sta]['branch_info']['matches'][0][0], ufr[flow1_end]['nsp']) self.assertEqual(ufr[ flow2_sta]['branch_info']['matches'][0][1], ufr[flow1_end]['nsi']) # we are creating the graph: self.assertEqual(ufr[ flow2_sta]['branch_info']['on_add'], True) # next_hops should be present in the src_node self.assertTrue('next_hops' in ufr[flow2_sta]) add_fcs = ufr[flow2_sta]['add_fcs'] del_fcs = ufr[flow2_sta]['del_fcs'] # dst PPG del_fcs must equal pre-graph add_fcs self.assertEqual(del_fcs, old_add2) self.assertEqual(len(add_fcs), 1) self.assertEqual(len(del_fcs), 1) for add_fc in add_fcs: # no LSPs for destination chain src_node self.assertDictContainsSubset({ 'logical_source_port': None }, add_fc) add_fcs = ufr[flow1_end]['add_fcs'] del_fcs = ufr[flow1_end]['del_fcs'] # src PPG del_fcs must equal pre-graph add_fcs self.assertEqual(del_fcs, old_add1) self.assertEqual(len(add_fcs), 1) self.assertEqual(len(del_fcs), 1) def test_create_service_graph_mpls(self): self._test_create_service_graph('mpls') def test_create_service_graph_nsh(self): self._test_create_service_graph('nsh') # post-graph-creation testing of test_create_service_graph_complex() def _test_create_service_graph_complex(self, g, sta_nodes, end_nodes, old_add1, old_add2, nsp, nsi): g_context = sfc_ctx.ServiceGraphContext( self.sfc_plugin, self.ctx, g['service_graph'] ) self.driver.create_service_graph_postcommit(g_context) self.wait() ufr = self.map_flow_rules(self.rpc_calls['update_flow_rules']) # assert that the common "nodes" of linked # chains have had their flow rules replaced self.assertEqual(set(ufr.keys()), set(sta_nodes + end_nodes)) for node in sta_nodes: # start nodes of dependent chains are src_node self.assertEqual(ufr[node]['node_type'], 'src_node') add_fcs = ufr[node]['add_fcs'] del_fcs = ufr[node]['del_fcs'] # dst PPG del_fcs must equal pre-graph add_fcs self.assertEqual(del_fcs, old_add2[node]) self.assertEqual(len(add_fcs), 1) self.assertEqual(len(del_fcs), 1) for add_fc in add_fcs: # no LSPs for destination chain src_node self.assertDictContainsSubset({ 'logical_source_port': None }, add_fc) # next_hops should be present in src_node self.assertTrue('next_hops' in ufr[node]) # no LSPs for destination chain src_node self.assertDictContainsSubset({ 'logical_source_port': None}, ufr[node]['add_fcs'][0]) # the graph will be created, so use matches together with add_fcs: self.assertEqual(ufr[node]['branch_info']['on_add'], True) # end nodes of dependency chains are sf_node for node in end_nodes: self.assertEqual(ufr[node]['node_type'], 'sf_node') self.assertDictContainsSubset({'branch_point': True}, ufr[node]) add_fcs = ufr[node]['add_fcs'] del_fcs = ufr[node]['del_fcs'] # src PPG del_fcs must equal pre-graph add_fcs self.assertEqual(del_fcs, old_add1[node]) self.assertEqual(len(add_fcs), 1) self.assertEqual(len(del_fcs), 1) # "joining" branches from pc4 and pc5 into pc6: ufr[sta_nodes[4]]['branch_info'][ 'matches'] = sorted(ufr[sta_nodes[4]][ 'branch_info']['matches'], key=( lambda m: m[0])) # sort by nsp # assert that each branch matches correctly self.assertTrue((nsp[end_nodes[0]], nsi[ end_nodes[0]],) == ufr[sta_nodes[0]][ 'branch_info']['matches'][0]) self.assertTrue((nsp[end_nodes[0]], nsi[ end_nodes[0]],) == ufr[sta_nodes[1]][ 'branch_info']['matches'][0]) self.assertTrue((nsp[end_nodes[1]], nsi[ end_nodes[1]],) == ufr[sta_nodes[2]][ 'branch_info']['matches'][0]) self.assertTrue((nsp[end_nodes[2]], nsi[ end_nodes[2]],) == ufr[sta_nodes[3]][ 'branch_info']['matches'][0]) self.assertTrue((nsp[end_nodes[3]], nsi[ end_nodes[3]],) == ufr[sta_nodes[4]][ 'branch_info']['matches'][0]) self.assertTrue((nsp[end_nodes[4]], nsi[ end_nodes[4]],) == ufr[sta_nodes[4]][ 'branch_info']['matches'][1]) # post-graph-creation testing of test_delete_service_graph_complex() def _test_delete_service_graph_complex(self, g, sta_nodes, end_nodes, old_add1, old_add2, nsp, nsi): g_context = sfc_ctx.ServiceGraphContext( self.sfc_plugin, self.ctx, g['service_graph'] ) self.driver.delete_service_graph_postcommit(g_context) self.wait() ufr = self.map_flow_rules(self.rpc_calls['update_flow_rules']) # assert that the common "nodes" of linked # chains have had their flow rules replaced self.assertEqual(set(ufr.keys()), set(sta_nodes + end_nodes)) for node in sta_nodes: # start nodes of dependent chains are src_node self.assertEqual(ufr[node]['node_type'], 'src_node') self.assertEqual(len(ufr[node]['del_fcs']), 1) self.assertEqual(len(ufr[node]['del_fcs']), 1) # dst PPG add_fcs is the same as pre-graph add_fcs self.assertEqual(ufr[node]['add_fcs'], old_add2[node]) # next_hops should be present in src_node self.assertTrue('next_hops' in ufr[node]) # no LSPs for destination chain src_node (del_fcs) self.assertDictContainsSubset({ 'logical_source_port': None}, ufr[node]['del_fcs'][0]) self.assertDictContainsSubset({ 'logical_source_port': node.split(':')[2]}, # egress port ufr[node]['add_fcs'][0]) # the graph will be deleted, so use matches together with del_fcs: self.assertEqual(ufr[node]['branch_info']['on_add'], False) # end nodes of dependency chains are sf_node for node in end_nodes: self.assertEqual(ufr[node]['node_type'], 'sf_node') self.assertDictContainsSubset({ 'branch_point': False }, ufr[node]) # src PPG add_fcs is the same as pre-graph add_fcs self.assertEqual(ufr[node]['add_fcs'], old_add1[node]) # "joining" branches from pc4 and pc5 into pc6: ufr[sta_nodes[4]]['branch_info']['matches'] = sorted(ufr[sta_nodes[4]][ 'branch_info']['matches'], key=( lambda m: m[0])) # sort by nsp # assert that each branch matches correctly self.assertTrue((nsp[end_nodes[0]], nsi[ end_nodes[0]],) == ufr[sta_nodes[0]]['branch_info']['matches'][0]) self.assertTrue((nsp[end_nodes[0]], nsi[ end_nodes[0]],) == ufr[sta_nodes[1]]['branch_info']['matches'][0]) self.assertTrue((nsp[end_nodes[1]], nsi[ end_nodes[1]],) == ufr[sta_nodes[2]]['branch_info']['matches'][0]) self.assertTrue((nsp[end_nodes[2]], nsi[ end_nodes[2]],) == ufr[sta_nodes[3]]['branch_info']['matches'][0]) self.assertTrue((nsp[end_nodes[3]], nsi[ end_nodes[3]],) == ufr[sta_nodes[4]]['branch_info']['matches'][0]) self.assertTrue((nsp[end_nodes[4]], nsi[ end_nodes[4]],) == ufr[sta_nodes[4]]['branch_info']['matches'][1]) # this test will create a very complex graph, that initially branches # (after pc1), and later joins back into a single service function path # this test will create a very complex graph, that initially branches # (after pc1), and later joins back into a single service function path # (on pc6), in the same host and using trivial flow classifiers. def _test_service_graph_complex(self, create, correlation): with self.port( name='lsport', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} # even though this will be set as the LSP of every PC, # it will be ignored on non-initial PCs (pc2-pc7), # this results in the FCs being lumped together in the same # "egress" flow rule, but with match_nsp/nsi in there, which # can be used by the OVS agent to decide how to match on traffic ) as lsport, self.port( name='pc1port', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as pc1port, self.port( name='pc2port', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as pc2port, self.port( name='pc3port', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as pc3port, self.port( name='pc4port', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as pc4port, self.port( name='pc5port', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as pc5port, self.port( name='pc6port', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as pc6port: self.host_endpoint_mapping = {'test': '10.0.0.1'} with self.flow_classifier(flow_classifier={ 'logical_source_port': lsport['port']['id'], 'destination_ip_prefix': '192.0.2.1/32'} ) as pc1fc, self.flow_classifier(flow_classifier={ 'logical_source_port': lsport['port']['id'], 'destination_ip_prefix': '192.0.2.2/32'} ) as pc2fc, self.flow_classifier(flow_classifier={ 'logical_source_port': lsport['port']['id'], 'destination_ip_prefix': '192.0.2.3/32'} ) as pc3fc, self.flow_classifier(flow_classifier={ 'logical_source_port': lsport['port']['id'], 'destination_ip_prefix': '192.0.2.4/32'} ) as pc4fc, self.flow_classifier(flow_classifier={ 'logical_source_port': lsport['port']['id'], 'destination_ip_prefix': '192.0.2.5/32'} ) as pc5fc, self.flow_classifier(flow_classifier={ 'logical_source_port': lsport['port']['id'], 'destination_ip_prefix': '192.0.2.6/32'} ) as pc6fc, self.port_pair(port_pair={ 'service_function_parameters': {'correlation': correlation}, 'ingress': pc1port['port']['id'], 'egress': pc1port['port']['id']} ) as pc1pp, self.port_pair(port_pair={ 'service_function_parameters': {'correlation': correlation}, 'ingress': pc2port['port']['id'], 'egress': pc2port['port']['id']} ) as pc2pp, self.port_pair(port_pair={ 'service_function_parameters': {'correlation': correlation}, 'ingress': pc3port['port']['id'], 'egress': pc3port['port']['id']} ) as pc3pp, self.port_pair(port_pair={ 'service_function_parameters': {'correlation': correlation}, 'ingress': pc4port['port']['id'], 'egress': pc4port['port']['id']} ) as pc4pp, self.port_pair(port_pair={ 'service_function_parameters': {'correlation': correlation}, 'ingress': pc5port['port']['id'], 'egress': pc5port['port']['id']} ) as pc5pp, self.port_pair(port_pair={ 'service_function_parameters': {'correlation': correlation}, 'ingress': pc6port['port']['id'], 'egress': pc6port['port']['id']} ) as pc6pp: # main reason for splitting this method in 2 is having # more than 20 contexts self._test_service_graph_complex_end( create, pc1fc, pc2fc, pc3fc, pc4fc, pc5fc, pc6fc, pc1pp, pc2pp, pc3pp, pc4pp, pc5pp, pc6pp, correlation) def _test_service_graph_complex_end( self, create, pc1fc, pc2fc, pc3fc, pc4fc, pc5fc, pc6fc, pc1pp, pc2pp, pc3pp, pc4pp, pc5pp, pc6pp, correlation): pc1pp_context = sfc_ctx.PortPairContext(self.sfc_plugin, self.ctx, pc1pp['port_pair']) pc2pp_context = sfc_ctx.PortPairContext(self.sfc_plugin, self.ctx, pc2pp['port_pair']) pc3pp_context = sfc_ctx.PortPairContext(self.sfc_plugin, self.ctx, pc3pp['port_pair']) pc4pp_context = sfc_ctx.PortPairContext(self.sfc_plugin, self.ctx, pc4pp['port_pair']) pc5pp_context = sfc_ctx.PortPairContext(self.sfc_plugin, self.ctx, pc5pp['port_pair']) pc6pp_context = sfc_ctx.PortPairContext(self.sfc_plugin, self.ctx, pc6pp['port_pair']) self.driver.create_port_pair(pc1pp_context) self.driver.create_port_pair(pc2pp_context) self.driver.create_port_pair(pc3pp_context) self.driver.create_port_pair(pc4pp_context) self.driver.create_port_pair(pc5pp_context) self.driver.create_port_pair(pc6pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pc1pp['port_pair']['id']]} ) as pc1pg, self.port_pair_group(port_pair_group={ 'port_pairs': [pc2pp['port_pair']['id']]} ) as pc2pg, self.port_pair_group(port_pair_group={ 'port_pairs': [pc3pp['port_pair']['id']]} ) as pc3pg, self.port_pair_group(port_pair_group={ 'port_pairs': [pc4pp['port_pair']['id']]} ) as pc4pg, self.port_pair_group(port_pair_group={ 'port_pairs': [pc5pp['port_pair']['id']]} ) as pc5pg, self.port_pair_group(port_pair_group={ 'port_pairs': [pc6pp['port_pair']['id']]} ) as pc6pg: pc1pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pc1pg['port_pair_group'] ) pc2pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pc2pg['port_pair_group'] ) pc3pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pc3pg['port_pair_group'] ) pc4pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pc4pg['port_pair_group'] ) pc5pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pc5pg['port_pair_group'] ) pc6pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pc6pg['port_pair_group'] ) self.driver.create_port_pair_group(pc1pg_context) self.driver.create_port_pair_group(pc2pg_context) self.driver.create_port_pair_group(pc3pg_context) self.driver.create_port_pair_group(pc4pg_context) self.driver.create_port_pair_group(pc5pg_context) self.driver.create_port_pair_group(pc6pg_context) with self.port_chain(port_chain={ 'chain_parameters': { 'correlation': correlation}, 'port_pair_groups': [ pc1pg['port_pair_group']['id']], 'flow_classifiers': [ pc1fc['flow_classifier']['id']]} ) as pc1, self.port_chain(port_chain={ 'chain_parameters': { 'correlation': correlation}, 'port_pair_groups': [ pc2pg['port_pair_group']['id']], 'flow_classifiers': [ pc2fc['flow_classifier']['id']]} ) as pc2, self.port_chain(port_chain={ 'chain_parameters': { 'correlation': correlation}, 'port_pair_groups': [ pc3pg['port_pair_group']['id']], 'flow_classifiers': [ pc3fc['flow_classifier']['id']]} ) as pc3, self.port_chain(port_chain={ 'chain_parameters': { 'correlation': correlation}, 'port_pair_groups': [ pc4pg['port_pair_group']['id']], 'flow_classifiers': [ pc4fc['flow_classifier']['id']]} ) as pc4, self.port_chain(port_chain={ 'chain_parameters': { 'correlation': correlation}, 'port_pair_groups': [ pc5pg['port_pair_group']['id']], 'flow_classifiers': [ pc5fc['flow_classifier']['id']]} ) as pc5, self.port_chain(port_chain={ 'chain_parameters': { 'correlation': correlation}, 'port_pair_groups': [ pc6pg['port_pair_group']['id']], 'flow_classifiers': [ pc6fc['flow_classifier']['id']]} ) as pc6: pc1_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc1['port_chain'] ) pc2_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc2['port_chain'] ) pc3_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc3['port_chain'] ) pc4_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc4['port_chain'] ) pc5_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc5['port_chain'] ) pc6_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc6['port_chain'] ) self.driver.create_port_chain(pc1_context) self.driver.create_port_chain(pc2_context) self.driver.create_port_chain(pc3_context) self.driver.create_port_chain(pc4_context) self.driver.create_port_chain(pc5_context) self.driver.create_port_chain(pc6_context) # original port-chains' update_flow_rules (ufr) ufr = self.map_flow_rules( self.rpc_calls['update_flow_rules']) # flow rule for the end of pc1 pc1_end = self.build_ingress_egress( pc1['port_chain']['id'], pc1pp['port_pair']['ingress'], pc1pp['port_pair']['egress']) # flow rule for the start of pc2 pc2_sta = self.build_ingress_egress( pc2['port_chain']['id'], None, pc2fc['flow_classifier']['logical_source_port'] ) # flow rule for the start of pc3 pc3_sta = self.build_ingress_egress( pc3['port_chain']['id'], None, pc3fc['flow_classifier']['logical_source_port'] ) # flow rule for the end of pc2 pc2_end = self.build_ingress_egress( pc2['port_chain']['id'], pc2pp['port_pair']['ingress'], pc2pp['port_pair']['egress']) # flow rule for the end of pc3 pc3_end = self.build_ingress_egress( pc3['port_chain']['id'], pc3pp['port_pair']['ingress'], pc3pp['port_pair']['egress']) # flow rule for the start of pc4 pc4_sta = self.build_ingress_egress( pc4['port_chain']['id'], None, pc4fc['flow_classifier']['logical_source_port'] ) # flow rule for the start of pc5 pc5_sta = self.build_ingress_egress( pc5['port_chain']['id'], None, pc5fc['flow_classifier']['logical_source_port'] ) # flow rule for the end of pc4 pc4_end = self.build_ingress_egress( pc4['port_chain']['id'], pc4pp['port_pair']['ingress'], pc4pp['port_pair']['egress']) # flow rule for the end of pc5 pc5_end = self.build_ingress_egress( pc5['port_chain']['id'], pc5pp['port_pair']['ingress'], pc5pp['port_pair']['egress']) # flow rule for the start of pc6 pc6_sta = self.build_ingress_egress( pc6['port_chain']['id'], None, pc6fc['flow_classifier']['logical_source_port'] ) sta_nodes = [pc2_sta, pc3_sta, pc4_sta, pc5_sta, pc6_sta] end_nodes = [pc1_end, pc2_end, pc3_end, pc4_end, pc5_end] self.assertEqual(len(set(sta_nodes)), 5) self.assertEqual(len(set(end_nodes)), 5) nsp = {} nsi = {} old_add1 = {} for node in end_nodes: # there should only be 1 FC per flow rule self.assertEqual(len(ufr[node]['add_fcs']), 1) # save each source chain's NSP/NSI for later nsp[node] = ufr[node]['nsp'] nsi[node] = ufr[node]['nsi'] # save add_fcs to later compare with del_fcs old_add1[node] = ufr[node]['add_fcs'] old_add2 = {} for node in sta_nodes: # there should only be 1 FC per flow rule self.assertEqual(len(ufr[node]['add_fcs']), 1) # save add_fcs to later compare with del_fcs old_add2[node] = ufr[node]['add_fcs'] with self.service_graph(service_graph={ 'name': 'graph', 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id'], pc3['port_chain']['id']], pc2['port_chain']['id']: [pc4['port_chain']['id']], pc3['port_chain']['id']: [pc5['port_chain']['id']], pc4['port_chain']['id']: [pc6['port_chain']['id']], pc5['port_chain']['id']: [pc6['port_chain']['id']]}} ) as g: # clear port-chains' flow rules self.init_rpc_calls() if create: self._test_create_service_graph_complex( g, sta_nodes, end_nodes, old_add1, old_add2, nsp, nsi) else: self._test_delete_service_graph_complex( g, sta_nodes, end_nodes, old_add1, old_add2, nsp, nsi) def test_create_service_graph_complex_mpls(self): self._test_service_graph_complex(True, 'mpls') def test_create_service_graph_complex_nsh(self): self._test_service_graph_complex(True, 'nsh') def test_delete_service_graph_complex_mpls(self): self._test_service_graph_complex(False, 'mpls') def test_delete_service_graph_complex_nsh(self): self._test_service_graph_complex(False, 'nsh') def test_create_port_chain_with_tap_enabled_ppg_only(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='ingress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress: self.host_endpoint_mapping = { 'test': '10.0.0.1', } with self.port_pair(port_pair={ 'ingress': ingress['port']['id'], 'egress': ingress['port']['id'] }) as pp: pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp['port_pair'] ) self.driver.create_port_pair(pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp['port_pair']['id']], 'tap_enabled': True }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.flow_classifier(flow_classifier={ 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 300, 'destination_port_range_max': 400, 'ethertype': 'IPv4', 'source_ip_prefix': '10.100.0.0/16', 'destination_ip_prefix': '10.200.0.0/16', 'l7_parameters': {}, 'protocol': 'tcp', 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) # proxy flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) # flow2 - sf_node flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress['port']['id'], None ) self.assertEqual( set(update_flow_rules.keys()), {flow1, flow2}) self.assertIn('skip_ingress_flow_config', update_flow_rules[flow1]) self.assertIsNone( update_flow_rules[flow2]['egress'] ) self.assertEqual( update_flow_rules[flow2]['node_type'], update_flow_rules[flow1]['node_type'] ) self.assertTrue( update_flow_rules[flow2]['tap_enabled']) def test_create_port_chain_with_default_and_tap_enabled_ppg(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='ingress1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress1, self.port( name='egress1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as egress1, self.port( name='ingress2', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress2: self.host_endpoint_mapping = { 'test': '10.0.0.1', } with self.port_pair(port_pair={ 'ingress': ingress1['port']['id'], 'egress': egress1['port']['id'] }) as default_pp, self.port_pair(port_pair={ 'ingress': ingress2['port']['id'], 'egress': ingress2['port']['id'] }) as tap_pp: default_pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, default_pp['port_pair'] ) self.driver.create_port_pair(default_pp_context) tap_pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, tap_pp['port_pair'] ) self.driver.create_port_pair(tap_pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [default_pp['port_pair']['id']] }) as default_ppg, self.port_pair_group(port_pair_group={ 'port_pairs': [tap_pp['port_pair']['id']], 'tap_enabled': True }) as tap_ppg: default_ppg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, default_ppg['port_pair_group'] ) self.driver.create_port_pair_group(default_ppg_context) tap_ppg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, tap_ppg['port_pair_group'] ) self.driver.create_port_pair_group(tap_ppg_context) with self.flow_classifier(flow_classifier={ 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 300, 'destination_port_range_max': 400, 'ethertype': 'IPv4', 'source_ip_prefix': '10.100.0.0/16', 'destination_ip_prefix': '10.200.0.0/16', 'l7_parameters': {}, 'protocol': 'tcp', 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [ default_ppg['port_pair_group']['id'], tap_ppg['port_pair_group']['id'] ], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() update_flow_rules = self.map_flow_rules( self.rpc_calls['update_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id'] ) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress1['port']['id'], egress1['port']['id'] ) flow3 = self.build_ingress_egress( pc['port_chain']['id'], ingress2['port']['id'], None ) self.assertEqual( set(update_flow_rules.keys()), {flow1, flow2, flow3} ) add_fcs = update_flow_rules[flow1]['add_fcs'] self.assertEqual(len(add_fcs), 1) self.assertIsNone( update_flow_rules[flow3]['egress']) # egress mac of previous node as src mac for tap # flow self.assertEqual( update_flow_rules[flow2]['mac_address'], update_flow_rules[flow3]['mac_address'] ) self.assertEqual( update_flow_rules[flow3]['node_type'], update_flow_rules[flow2]['node_type'] ) def test_delete_port_chain_of_tap_enabled_ppg(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port, self.port( name='ingress', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as ingress: self.host_endpoint_mapping = { 'test': '10.0.0.1', } with self.port_pair(port_pair={ 'ingress': ingress['port']['id'], 'egress': ingress['port']['id'] }) as pp: pp_context = sfc_ctx.PortPairContext( self.sfc_plugin, self.ctx, pp['port_pair'] ) self.driver.create_port_pair(pp_context) with self.port_pair_group(port_pair_group={ 'port_pairs': [pp['port_pair']['id']], 'tap_enabled': True }) as pg: pg_context = sfc_ctx.PortPairGroupContext( self.sfc_plugin, self.ctx, pg['port_pair_group'] ) self.driver.create_port_pair_group(pg_context) with self.flow_classifier(flow_classifier={ 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 300, 'destination_port_range_max': 400, 'ethertype': 'IPv4', 'source_ip_prefix': '10.100.0.0/16', 'destination_ip_prefix': '10.200.0.0/16', 'l7_parameters': {}, 'protocol': 'tcp', 'logical_source_port': src_port['port']['id'] }) as fc: with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']] }) as pc: pc_context = sfc_ctx.PortChainContext( self.sfc_plugin, self.ctx, pc['port_chain'] ) self.driver.create_port_chain(pc_context) self.wait() self.driver.delete_port_chain(pc_context) delete_flow_rules = self.map_flow_rules( self.rpc_calls['delete_flow_rules']) flow1 = self.build_ingress_egress( pc['port_chain']['id'], None, src_port['port']['id']) flow2 = self.build_ingress_egress( pc['port_chain']['id'], ingress['port']['id'], None ) self.assertEqual( set(delete_flow_rules.keys()), {flow1, flow2}) networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/common/0000775000175000017500000000000013656750461025620 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/common/__init__.py0000664000175000017500000000000013656750333027715 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/common/test_ovs_ext_lib.py0000664000175000017500000000576013656750333031554 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions from neutron.tests import base from networking_sfc.services.sfc.common import ovs_ext_lib class GetPortMaskTestCase(base.BaseTestCase): def setUp(self): super(GetPortMaskTestCase, self).setUp() def tearDown(self): super(GetPortMaskTestCase, self).tearDown() def test_single_port(self): masks = ovs_ext_lib.get_port_mask(100, 100) self.assertEqual(['0x64/0xffff'], masks) def test_invalid_min_port(self): self.assertRaises( exceptions.InvalidInput, ovs_ext_lib.get_port_mask, 0, 100 ) def test_invalid_max_port(self): self.assertRaises( exceptions.InvalidInput, ovs_ext_lib.get_port_mask, 100, 65536 ) def test_invalid_port_range(self): self.assertRaises( exceptions.InvalidInput, ovs_ext_lib.get_port_mask, 100, 99 ) def test_one_port_mask(self): masks = ovs_ext_lib.get_port_mask(100, 101) self.assertEqual(['0x64/0xfffe'], masks) masks = ovs_ext_lib.get_port_mask(100, 103) self.assertEqual(['0x64/0xfffc'], masks) masks = ovs_ext_lib.get_port_mask(32768, 65535) self.assertEqual(['0x8000/0x8000'], masks) def test_multi_port_masks(self): masks = ovs_ext_lib.get_port_mask(101, 102) self.assertEqual(['0x65/0xffff', '0x66/0xffff'], masks) masks = ovs_ext_lib.get_port_mask(101, 104) self.assertEqual( ['0x65/0xffff', '0x66/0xfffe', '0x68/0xffff'], masks ) masks = ovs_ext_lib.get_port_mask(1, 65535) self.assertEqual( [ '0x1/0xffff', '0x2/0xfffe', '0x4/0xfffc', '0x8/0xfff8', '0x10/0xfff0', '0x20/0xffe0', '0x40/0xffc0', '0x80/0xff80', '0x100/0xff00', '0x200/0xfe00', '0x400/0xfc00', '0x800/0xf800', '0x1000/0xf000', '0x2000/0xe000', '0x4000/0xc000', '0x8000/0x8000' ], masks ) masks = ovs_ext_lib.get_port_mask(32767, 65535) self.assertEqual( ['0x7fff/0xffff', '0x8000/0x8000'], masks ) networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/agent/0000775000175000017500000000000013656750461025426 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/agent/__init__.py0000664000175000017500000000000013656750333027523 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/agent/extensions/0000775000175000017500000000000013656750461027625 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/agent/extensions/__init__.py0000664000175000017500000000000013656750333031722 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/agent/extensions/openvswitch/0000775000175000017500000000000013656750461032176 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/agent/extensions/openvswitch/__init__.pynetworking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/agent/extensions/openvswitch/__init__.p0000664000175000017500000000000013656750333034102 0ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/agent/extensions/openvswitch/test_sfc_driver.pynetworking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/agent/extensions/openvswitch/test_sfc_d0000664000175000017500000055373613656750333034257 0ustar zuulzuul00000000000000# Copyright 2015 Huawei. # Copyright 2016 Red Hat, Inc. # Copyright 2017 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_config import cfg from oslo_utils import importutils from oslo_utils import uuidutils from neutron.agent.common import ovs_lib from neutron.agent.common import utils from neutron.plugins.ml2.drivers.openvswitch.agent import ( ovs_agent_extension_api as ovs_ext_api) from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native import ( ovs_bridge) from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent import ( ovs_test_base) from networking_sfc.services.sfc.agent.extensions.openvswitch import sfc_driver from networking_sfc.services.sfc.common import ovs_ext_lib class SfcAgentDriverTestCase(ovs_test_base.OVSOSKenTestBase): def _clear_local_entries(self): self.executed_cmds = [] self.node_flowrules = [] self.added_flows = [] self.installed_instructions = [] self.deleted_flows = [] self.group_mapping = {} self.deleted_groups = [] self.port_mapping = {} def setUp(self): cfg.CONF.set_override('local_ip', '10.0.0.1', 'OVS') super(SfcAgentDriverTestCase, self).setUp() self._clear_local_entries() self.execute = mock.patch.object( utils, "execute", self.mock_execute, spec=utils.execute) self.execute.start() self.use_at_least_protocol = mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.use_at_least_protocol') self.use_at_least_protocol.start() self.dp = mock.Mock() self.ofp = importutils.import_module("os_ken.ofproto.ofproto_v1_3") self.ofpp = importutils.import_module( "os_ken.ofproto.ofproto_v1_3_parser") mock.patch.object(ovs_bridge.OVSAgentBridge, "_get_dp", return_value=self._get_dp()).start() mock.patch.object(ovs_bridge.OVSAgentBridge, "install_instructions", self.mock_install_instructions).start() self.add_flow = mock.patch( "neutron.agent.common.ovs_lib.OVSBridge.add_flow", self.mock_add_flow ) self.add_flow.start() self.delete_flows = mock.patch( "neutron.agent.common.ovs_lib.OVSBridge.delete_flows", self.mock_delete_flows ) self.delete_flows.start() self.int_patch = 1 self.tun_patch = 2 self.default_port_mapping = { 'patch-int': { 'ofport': self.int_patch }, 'patch-tun': { 'ofport': self.tun_patch } } self.get_vif_port_by_id = mock.patch.object( ovs_lib.OVSBridge, "get_vif_port_by_id", self.mock_get_vif_port_by_id ) self.get_vif_port_by_id.start() self.get_vlan_by_port = mock.patch.object( sfc_driver.SfcOVSAgentDriver, "_get_vlan_by_port", self.mock_get_vlan_by_port ) self.get_vlan_by_port.start() self.get_port_ofport = mock.patch.object( ovs_lib.OVSBridge, "get_port_ofport", self.mock_get_port_ofport ) self.get_port_ofport.start() self.set_secure_mode = mock.patch.object( ovs_lib.OVSBridge, "set_secure_mode", self.mock_set_secure_mode ) self.set_secure_mode.start() self.del_controller = mock.patch.object( ovs_lib.OVSBridge, "del_controller", self.mock_del_controller ) self.del_controller.start() self.get_bridges = mock.patch.object( ovs_lib.BaseOVS, "get_bridges", self.mock_get_bridges ) self.get_bridges.start() self.get_vif_ports = mock.patch.object( ovs_lib.OVSBridge, "get_vif_ports", self.mock_get_vif_ports ) self.get_vif_ports.start() self.get_ports_attributes = mock.patch.object( ovs_lib.OVSBridge, "get_ports_attributes", self.mock_get_ports_attributes ) self.get_ports_attributes.start() self.delete_port = mock.patch.object( ovs_lib.OVSBridge, "delete_port", self.mock_delete_port ) self.delete_port.start() self.create = mock.patch.object( ovs_lib.OVSBridge, "create", self.mock_create ) self.create.start() self.add_port = mock.patch.object( ovs_lib.OVSBridge, "add_port", self.mock_add_port ) self.add_port.start() self.bridge_exists = mock.patch.object( ovs_lib.BaseOVS, "bridge_exists", self.mock_bridge_exists ) self.bridge_exists.start() self.port_exists = mock.patch.object( ovs_lib.BaseOVS, "port_exists", self.mock_port_exists ) self.port_exists.start() self.capabilities = mock.patch.object( ovs_lib.BaseOVS, "capabilities", self.mock_capabilities ) self.capabilities.start() self.apply_flows = mock.patch.object( ovs_lib.DeferredOVSBridge, "apply_flows", self.mock_apply_flows ) self.apply_flows.start() self.dump_group_for_id = mock.patch.object( ovs_ext_lib.SfcOVSBridgeExt, "dump_group_for_id", self.mock_dump_group_for_id ) self.dump_group_for_id.start() self.add_group = mock.patch.object( ovs_ext_lib.SfcOVSBridgeExt, "add_group", self.mock_add_group ) self.add_group.start() self.mod_group = mock.patch.object( ovs_ext_lib.SfcOVSBridgeExt, "mod_group", self.mock_mod_group ) self.mod_group.start() self.delete_group = mock.patch.object( ovs_ext_lib.SfcOVSBridgeExt, "delete_group", self.mock_delete_group ) self.delete_group.start() self.get_bridge_ports = mock.patch.object( ovs_ext_lib.SfcOVSBridgeExt, "get_bridge_ports", self.mock_get_bridge_ports ) self.get_bridge_ports.start() self.sfc_driver = sfc_driver.SfcOVSAgentDriver() os_ken_app = mock.Mock() self.agent_api = ovs_ext_api.OVSAgentExtensionAPI( ovs_bridge.OVSAgentBridge('br-int', os_ken_app=os_ken_app), ovs_bridge.OVSAgentBridge('br-tun', os_ken_app=os_ken_app)) self.sfc_driver.consume_api(self.agent_api) self.sfc_driver.initialize() self._clear_local_entries() def _get_dp(self): return self.dp, self.ofp, self.ofpp def mock_delete_group(self, group_id): if group_id == 'all': self.group_mapping = {} else: if group_id in self.group_mapping: del self.group_mapping[group_id] self.deleted_groups.append(group_id) def mock_mod_group(self, group_id, **kwargs): kwargs['group_id'] = group_id self.group_mapping[group_id] = kwargs def mock_add_group(self, group_id, **kwargs): kwargs['group_id'] = group_id self.group_mapping[group_id] = kwargs def mock_dump_group_for_id(self, group_id): if group_id in self.group_mapping: group_list = [] group = self.group_mapping[group_id] for group_key, group_value in group.items(): group_list.append('%s=%s' % (group_key, group_value)) return ' '.join(group_list) else: return '' def mock_set_secure_mode(self): pass def mock_del_controller(self): pass def mock_get_bridges(self): return ['br-int', 'br-tun'] def mock_get_port_ofport(self, port_name): for port_id, port_values in self.port_mapping.items(): if port_values['port_name'] == port_name: return port_values['ofport'] if port_name in self.default_port_mapping: return self.default_port_mapping[port_name]['ofport'] return ovs_lib.INVALID_OFPORT def mock_add_port(self, port_name, *interface_attr_tuples): return self.mock_get_port_ofport(port_name) def mock_bridge_exists(self, bridge_name): return True def mock_port_exists(self, port_name): return True def mock_capabilities(self): return {'datapath_types': [], 'iface_types': []} def mock_apply_flows(self): pass def mock_get_vif_port_by_id(self, port_id): if port_id in self.port_mapping: port_values = self.port_mapping[port_id] return ovs_lib.VifPort( port_values['port_name'], port_values['ofport'], port_id, port_values['vif_mac'], self.sfc_driver.br_int ) def mock_get_vlan_by_port(self, port_id): return 0 def mock_get_vif_ports(self, ofport_filter): vif_ports = [] for port_id, port_values in self.port_mapping.items(): vif_ports.append( ovs_lib.VifPort( port_values['port_name'], port_values['ofport'], port_id, port_values['vif_mac'], self.sfc_driver.br_int ) ) return vif_ports def mock_get_ports_attributes( self, table, columns=None, ports=None, check_error=True, log_errors=True, if_exists=False ): port_infos = [] for port_id, port_values in self.port_mapping.items(): port_info = {} if columns: if 'name' in columns: port_info['name'] = port_values['port_name'] if 'ofport' in columns: port_info['ofport'] = port_values['ofport'] if 'extenal_ids' in columns: port_info['extenal_ids'] = { 'iface-id': port_id, 'attached-mac': port_values['vif_mac'] } if 'other_config' in columns: port_info['other_config'] = {} if 'tag' in columns: port_info['tag'] = [] else: port_info = { 'name': port_values['port_name'], 'ofport': port_values['ofport'], 'extenal_ids': { 'iface-id': port_id, 'attached-mac': port_values['vif_mac'] }, 'other_config': {}, 'tag': [] } if ports: if port_values['port_name'] in ports: port_infos.append(port_info) else: port_infos.append(port_info) return port_infos def mock_delete_port(self, port_name): found_port_id = None for port_id, port_values in self.port_mapping.items(): if port_values['port_name'] == port_name: found_port_id = port_id if found_port_id: del self.port_mapping[found_port_id] def mock_create(self, secure_mode=False): pass def mock_install_instructions(self, *args, **kwargs): if kwargs not in self.installed_instructions: self.installed_instructions.append(kwargs) def mock_add_flow(self, *args, **kwargs): if kwargs not in self.added_flows: self.added_flows.append(kwargs) def mock_delete_flows(self, *args, **kwargs): if kwargs not in self.deleted_flows: self.deleted_flows.append(kwargs) def mock_get_flowrules_by_host_portid(self, context, port_id): return [ flowrule for flowrule in self.node_flowrules if ( flowrule['ingress'] == port_id or flowrule['egress'] == port_id ) ] def mock_get_all_src_node_flowrules(self, context): return [ flowrule for flowrule in self.node_flowrules if ( flowrule['node_type'] == 'src_node' and flowrule['egress'] is None ) ] def mock_execute(self, cmd, *args, **kwargs): self.executed_cmds.append(' '.join(cmd)) def mock_get_bridge_ports(self): return [77, 88] def tearDown(self): self.execute.stop() self.use_at_least_protocol.stop() self.add_flow.stop() self.delete_flows.stop() self.get_vif_port_by_id.stop() self.get_vlan_by_port.stop() self.get_port_ofport.stop() self.set_secure_mode.stop() self.del_controller.stop() self.get_bridges.stop() self.get_vif_ports.stop() self.get_ports_attributes.stop() self.delete_port.stop() self.create.stop() self.add_port.stop() self.bridge_exists.stop() self.port_exists.stop() self.capabilities.stop() self.apply_flows.stop() self.dump_group_for_id.stop() self.add_group.stop() self.mod_group.stop() self.delete_group.stop() self._clear_local_entries() super(SfcAgentDriverTestCase, self).tearDown() def _assert_update_flow_rules_sf_node_many_hops_no_proxy_mpls(self): self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:ab:cd', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': 'group:1', 'eth_type': 34887, 'in_port': 42, 'mpls_label': 65791, 'priority': 30, 'table': 0 }, { 'actions': 'strip_vlan, output:6', 'dl_dst': '00:01:02:03:05:07', 'eth_type': 34887, 'dl_vlan': 0, 'mpls_label': 65792, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5),' 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:ab:cd, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def _assert_update_flow_rules_sf_node_many_hops_no_proxy_nsh(self): self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 35151, 'priority': 0, 'table': 5 }, { 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:ab:cd', 'eth_type': 35151, 'priority': 0, 'table': 5 }, { 'actions': 'group:1', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 255, 'in_port': 42, 'priority': 30, 'table': 0 }, { 'actions': 'strip_vlan, output:6', 'dl_dst': '00:01:02:03:05:07', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 256, 'dl_vlan': 0, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5),' 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:ab:cd, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def _prepare_update_flow_rules_sf_node_empty_next_hops( self, pc_corr, pp_corr): self.port_mapping = { 'dd7374b9-a6ac-4a66-a4a6-7d3dee2a1579': { 'port_name': 'src_port', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '2f1d2140-42ce-4979-9542-7ef25796e536': { 'port_name': 'dst_port', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.update_flow_rules( { 'nsi': 254, 'ingress': u'dd7374b9-a6ac-4a66-a4a6-7d3dee2a1579', 'next_hops': None, 'del_fcs': [], 'group_refcnt': 1, 'node_type': 'sf_node', 'egress': u'2f1d2140-42ce-4979-9542-7ef25796e536', 'next_group_id': None, 'nsp': 256, 'add_fcs': [], 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': pp_corr }, status ) def _prepare_update_flow_rules_src_node_empty_next_hops_a_d( self, pc_corr, pp_corr): self.port_mapping = { '9bedd01e-c216-4dfd-b48e-fbd5c8212ba4': { 'port_name': 'dst_port', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.update_flow_rules( { 'nsi': 255, 'ingress': None, 'next_hops': None, 'del_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': u'10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': u'IPv4', 'destination_port_range_max': 100, }], 'group_refcnt': 1, 'node_type': 'src_node', 'egress': u'9bedd01e-c216-4dfd-b48e-fbd5c8212ba4', 'next_group_id': None, 'nsp': 256, 'add_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': u'10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': u'IPv4', 'destination_port_range_max': 100, }], 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': pp_corr }, status ) self.assertEqual( [], self.executed_cmds ) def _test_update_flow_rules_src_empty_next_hops_a_d(self, pc_corr): self._prepare_update_flow_rules_src_node_empty_next_hops_a_d( pc_corr, None) self.assertEqual( [{ 'actions': 'normal', 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': u'10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( [{ 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': u'10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True, }], self.deleted_flows ) self.assertEqual( {}, self.group_mapping ) def _prepare_update_flow_rules_sf_node_empty_next_hops_a_d( self, pc_corr, pp_corr): self.port_mapping = { '9bedd01e-c216-4dfd-b48e-fbd5c8212ba4': { 'port_name': 'dst_port', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', }, '2f1d2140-42ce-4979-9542-7ef25796e536': { 'port_name': 'dst_port', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.update_flow_rules( { 'nsi': 255, 'ingress': '2f1d2140-42ce-4979-9542-7ef25796e536', 'next_hops': None, 'del_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': u'10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': u'IPv4', 'destination_port_range_max': 100, }], 'group_refcnt': 1, 'node_type': 'sf_node', 'egress': u'9bedd01e-c216-4dfd-b48e-fbd5c8212ba4', 'next_group_id': None, 'nsp': 256, 'add_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': u'10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': u'IPv4', 'destination_port_range_max': 100, }], 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': pp_corr }, status ) self.assertEqual( [], self.executed_cmds ) def _prepare_update_flow_rules_src_node_next_hops_add_fcs( self, pc_corr, pp_corr, pp_corr_nh): self.port_mapping = { '8768d2b3-746d-4868-ae0e-e81861c2b4e6': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '29e38fb2-a643-43b1-baa8-a86596461cd5': { 'port_name': 'port2', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.update_flow_rules( { 'nsi': 255, 'ingress': None, 'next_hops': [{ 'local_endpoint': '10.0.0.2', 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'weight': 1, 'net_uuid': '8768d2b3-746d-4868-ae0e-e81861c2b4e7', 'network_type': 'vxlan', 'segment_id': 33, 'gw_mac': '00:01:02:03:06:09', 'cidr': '10.0.0.0/8', 'in_mac_address': '12:34:56:78:cf:23', 'pp_corr': pp_corr_nh }], 'del_fcs': [], 'group_refcnt': 1, 'node_type': 'src_node', 'egress': '29e38fb2-a643-43b1-baa8-a86596461cd5', 'next_group_id': 1, 'nsp': 256, 'add_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100, }], 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': pp_corr }, status ) self.assertEqual( [], self.executed_cmds ) def _prepare_update_flow_rules_src_node_next_hops_same_host_a( self, pc_corr, pp_corr_nh): self.port_mapping = { '8768d2b3-746d-4868-ae0e-e81861c2b4e6': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '29e38fb2-a643-43b1-baa8-a86596461cd5': { 'port_name': 'port2', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.update_flow_rules( { 'nsi': 255, 'ingress': None, 'next_hops': [{ 'local_endpoint': '10.0.0.1', 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'weight': 1, 'net_uuid': '8768d2b3-746d-4868-ae0e-e81861c2b4e7', 'network_type': 'vxlan', 'segment_id': 33, 'gw_mac': '00:01:02:03:06:09', 'cidr': '10.0.0.0/8', 'in_mac_address': '12:34:56:78:cf:23', 'pp_corr': pp_corr_nh }], 'del_fcs': [], 'group_refcnt': 1, 'node_type': 'src_node', 'egress': '29e38fb2-a643-43b1-baa8-a86596461cd5', 'next_group_id': 1, 'nsp': 256, 'add_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100, }], 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': None }, status ) self.assertEqual( [], self.executed_cmds ) def _prepare_update_flow_rules_sf_node_next_hops_add_fcs( self, pc_corr, pp_corr, pp_corr_nh): self.port_mapping = { '8768d2b3-746d-4868-ae0e-e81861c2b4e6': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '29e38fb2-a643-43b1-baa8-a86596461cd5': { 'port_name': 'port2', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', }, '6331a00d-779b-462b-b0e4-6a65aa3164ef': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, } status = [] self.sfc_driver.update_flow_rules( { 'nsi': 255, 'ingress': '6331a00d-779b-462b-b0e4-6a65aa3164ef', 'next_hops': [{ 'local_endpoint': '10.0.0.2', 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'weight': 1, 'net_uuid': '8768d2b3-746d-4868-ae0e-e81861c2b4e7', 'network_type': 'vxlan', 'segment_id': 33, 'gw_mac': '00:01:02:03:06:09', 'cidr': '10.0.0.0/8', 'in_mac_address': '12:34:56:78:cf:23', 'pp_corr': pp_corr_nh }], 'del_fcs': [], 'group_refcnt': 1, 'node_type': 'sf_node', 'egress': '29e38fb2-a643-43b1-baa8-a86596461cd5', 'next_group_id': 1, 'nsp': 256, 'add_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100, }], 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': pp_corr }, status ) self.assertEqual( [], self.executed_cmds ) def test_update_flowrules_srcnode_no_nexthops_add_del_fcs_symmetric(self): self.port_mapping = { '8768d2b3-746d-4868-ae0e-e81861c2b4e6': { 'port_name': 'src_port', 'ofport': 32, 'vif_mac': '00:01:02:03:05:07', }, '29e38fb2-a643-43b1-baa8-a86596461cd5': { 'port_name': 'dst_port', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } for flow_rule in self.node_flowrules: if flow_rule['fwd_path']: status = [] self.agent.update_flow_rules( { 'nsi': 255, 'ingress': None, 'next_hops': None, 'del_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': u'10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': u'IPv4', 'destination_port_range_max': 100, }], 'group_refcnt': 1, 'node_type': 'src_node', 'egress': u'29e38fb2-a643-43b1-baa8-a86596461cd5', 'next_group_id': None, 'nsp': 256, 'add_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': u'10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': u'IPv4', 'destination_port_range_max': 100, }], 'id': uuidutils.generate_uuid(), 'fwd_path': True }, status ) self.assertEqual( [], self.executed_cmds ) self.assertEqual( [{ 'actions': 'normal', 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': u'10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( [{ 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': u'10.100.0.0/16', 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.deleted_flows ) self.assertEqual( {}, self.group_mapping ) else: status = [] self.agent.update_flow_rules( { 'nsi': 255, 'ingress': None, 'next_hops': None, 'del_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.100.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': u'10.200.0.0/16', 'destination_port_range_min': 100, 'ethertype': u'IPv4', 'destination_port_range_max': 100, }], 'group_refcnt': 1, 'node_type': 'src_node', 'egress': u'8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'next_group_id': None, 'nsp': 256, 'add_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.100.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': u'10.200.0.0/16', 'destination_port_range_min': 100, 'ethertype': u'IPv4', 'destination_port_range_max': 100, }], 'id': uuidutils.generate_uuid(), 'fwd_path': False }, status ) self.assertEqual( [], self.executed_cmds ) self.assertEqual( [{ 'actions': 'normal', 'eth_type': 2048, 'in_port': 32, 'nw_dst': u'10.100.0.0/16', 'nw_proto': 6, 'nw_src': u'10.200.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( [{ 'eth_type': 2048, 'in_port': 32, 'nw_dst': u'10.100.0.0/16', 'nw_proto': 6, 'nw_src': u'10.200.0.0/16', 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True, }], self.deleted_flows ) self.assertEqual( {}, self.group_mapping ) def _test_update_flow_rules_sf_node_next_hops_add_fcs_no_proxy(self, pp_corr_nh): self._prepare_update_flow_rules_sf_node_next_hops_add_fcs('mpls', 'mpls', pp_corr_nh) self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,output:2'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': 'group:1', 'eth_type': 34887, 'in_port': 42, 'mpls_label': 65791, 'priority': 30, 'table': 0 }, { 'actions': 'strip_vlan, output:6', 'dl_dst': '00:01:02:03:05:07', 'eth_type': 34887, 'dl_vlan': 0, 'mpls_label': 65792, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_sf_node_next_hops_add_fcs_no_proxy(self): self._test_update_flow_rules_sf_node_next_hops_add_fcs_no_proxy(None) def test_update_flow_rules_sf_node_next_hops_add_fcs_no_proxy_nh(self): self._test_update_flow_rules_sf_node_next_hops_add_fcs_no_proxy('mpls') def _prepare_update_flow_rules_sf_node_next_hops_same_host_add_fcs( self, pc_corr, pp_corr, pp_corr_nh): self.port_mapping = { '8768d2b3-746d-4868-ae0e-e81861c2b4e6': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '29e38fb2-a643-43b1-baa8-a86596461cd5': { 'port_name': 'port2', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', }, '6331a00d-779b-462b-b0e4-6a65aa3164ef': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, } status = [] self.sfc_driver.update_flow_rules( { 'nsi': 255, 'ingress': '6331a00d-779b-462b-b0e4-6a65aa3164ef', 'next_hops': [{ 'local_endpoint': '10.0.0.1', 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'weight': 1, 'net_uuid': '8768d2b3-746d-4868-ae0e-e81861c2b4e7', 'network_type': 'vxlan', 'segment_id': 33, 'gw_mac': '00:01:02:03:06:09', 'cidr': '10.0.0.0/8', 'in_mac_address': '12:34:56:78:cf:23', 'pp_corr': pp_corr_nh }], 'del_fcs': [], 'group_refcnt': 1, 'node_type': 'sf_node', 'egress': '29e38fb2-a643-43b1-baa8-a86596461cd5', 'next_group_id': 1, 'nsp': 256, 'add_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100, }], 'pc_corr': pc_corr, 'pp_corr': pp_corr, 'id': uuidutils.generate_uuid(), 'fwd_path': True }, status ) self.assertEqual( [], self.executed_cmds ) # to go from chain src_node to graph src_node, or vice-versa (on_add=True) def _prepare_update_flow_rules_src_node_graph_dependent_a( self, pc_corr, host, on_add): self.port_mapping = { '8768d2b3-746d-4868-ae0e-e81861c2b4e6': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '29e38fb2-a643-43b1-baa8-a86596461cd5': { 'port_name': 'port2', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.update_flow_rules( { 'nsi': 252, 'ingress': None, 'next_hops': [{ 'local_endpoint': host, 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'weight': 1, 'net_uuid': '8768d2b3-746d-4868-ae0e-e81861c2b4e7', 'network_type': 'vxlan', 'segment_id': 33, 'gw_mac': '00:01:02:03:06:09', 'cidr': '10.0.0.0/8', 'in_mac_address': '12:34:56:78:cf:23', 'pp_corr': pc_corr }], 'del_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100 }], 'group_refcnt': 1, 'node_type': 'src_node', 'egress': '29e38fb2-a643-43b1-baa8-a86596461cd5', 'branch_info': { 'on_add': on_add, 'matches': [(240, 200)] }, 'next_group_id': 1, 'nsp': 250, 'add_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100 }], 'pc_corr': pc_corr, 'pp_corr': None, 'id': uuidutils.generate_uuid(), 'fwd_path': True }, status ) self.assertEqual( [], self.executed_cmds ) # to go from chain's last sf_node to graph's last sf_node, # or vice-versa (branch_point=False or missing) def _prepare_update_flow_rules_lastsf_node_graph_dependency_same_h_a( self, pc_corr, branch_point): self.port_mapping = { '9bedd01e-c216-4dfd-b48e-fbd5c8212ba4': { 'port_name': 'dst_port', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', }, '2f1d2140-42ce-4979-9542-7ef25796e536': { 'port_name': 'dst_port', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.update_flow_rules( { 'nsi': 200, 'ingress': '2f1d2140-42ce-4979-9542-7ef25796e536', 'next_hops': None, 'del_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': u'10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': u'IPv4', 'destination_port_range_max': 100, }], 'group_refcnt': 1, 'node_type': 'sf_node', 'egress': u'9bedd01e-c216-4dfd-b48e-fbd5c8212ba4', 'next_group_id': None, 'nsp': 240, 'add_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': u'10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': u'IPv4', 'destination_port_range_max': 100, }], 'pc_corr': pc_corr, 'pp_corr': pc_corr, 'branch_point': branch_point, 'id': uuidutils.generate_uuid(), 'fwd_path': True }, status ) self.assertEqual( [], self.executed_cmds ) # tests flow rules for "joining" branches (many entries in branch_info) def _prepare_update_flow_rules_src_node_graph_dependent_join( self, pc_corr, host, on_add): self.port_mapping = { '8768d2b3-746d-4868-ae0e-e81861c2b4e6': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '29e38fb2-a643-43b1-baa8-a86596461cd5': { 'port_name': 'port2', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.update_flow_rules( { 'nsi': 252, 'ingress': None, 'next_hops': [{ 'local_endpoint': host, 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'weight': 1, 'net_uuid': '8768d2b3-746d-4868-ae0e-e81861c2b4e7', 'network_type': 'vxlan', 'segment_id': 33, 'gw_mac': '00:01:02:03:06:09', 'cidr': '10.0.0.0/8', 'in_mac_address': '12:34:56:78:cf:23', 'pp_corr': pc_corr }], 'del_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100 }], 'group_refcnt': 1, 'node_type': 'src_node', 'egress': '29e38fb2-a643-43b1-baa8-a86596461cd5', 'branch_info': { 'on_add': on_add, 'matches': [(240, 200), (250, 100)] }, 'next_group_id': 1, 'nsp': 250, 'add_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100, }], 'pc_corr': pc_corr, 'pp_corr': None, 'id': uuidutils.generate_uuid(), 'fwd_path': True }, status ) self.assertEqual( [], self.executed_cmds ) def _prepare_update_flow_rules_sf_node_many_hops_all_encap( self, pc_corr, pp_corr, pp_corr_nh): self.port_mapping = { '8768d2b3-746d-4868-ae0e-e81861c2b4e6': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '1234d2b3-746d-4868-ae0e-e81861c25678': { 'port_name': 'port3', 'ofport': 9, 'vif_mac': '00:01:02:0a:0b:0c', }, '29e38fb2-a643-43b1-baa8-a86596461cd5': { 'port_name': 'port2', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', }, '6331a00d-779b-462b-b0e4-6a65aa3164ef': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, } status = [] self.sfc_driver.update_flow_rules( { 'nsi': 255, 'ingress': '6331a00d-779b-462b-b0e4-6a65aa3164ef', 'next_hops': [{ 'local_endpoint': '10.0.0.1', 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'weight': 1, 'net_uuid': '8768d2b3-746d-4868-ae0e-e81861c2b4e7', 'network_type': 'vxlan', 'segment_id': 33, 'gw_mac': '00:01:02:03:06:09', 'cidr': '10.0.0.0/8', 'in_mac_address': '12:34:56:78:cf:23', 'pp_corr': pp_corr_nh }, { 'local_endpoint': '10.0.0.1', 'ingress': '1234d2b3-746d-4868-ae0e-e81861c25678', 'weight': 1, 'net_uuid': '1234d2b3-746d-4868-ae0e-e81861c25678', 'network_type': 'vxlan', 'segment_id': 33, 'gw_mac': '00:01:02:03:06:09', 'cidr': '10.0.0.0/8', 'in_mac_address': '12:34:56:78:ab:cd', 'pp_corr': pp_corr_nh }], 'del_fcs': [], 'group_refcnt': 1, 'node_type': 'sf_node', 'egress': '29e38fb2-a643-43b1-baa8-a86596461cd5', 'next_group_id': 1, 'nsp': 256, 'add_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100, }], 'pc_corr': pc_corr, 'pp_corr': pp_corr, 'id': uuidutils.generate_uuid(), 'fwd_path': True }, status ) self.assertEqual( [], self.executed_cmds ) def _prepare_delete_flow_rules_sf_node_empty_del_fcs( self, pc_corr, pp_corr): self.port_mapping = { 'dd7374b9-a6ac-4a66-a4a6-7d3dee2a1579': { 'port_name': 'src_port', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '2f1d2140-42ce-4979-9542-7ef25796e536': { 'port_name': 'dst_port', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.delete_flow_rule( { 'nsi': 254, 'ingress': u'dd7374b9-a6ac-4a66-a4a6-7d3dee2a1579', 'next_hops': None, 'del_fcs': [], 'group_refcnt': 1, 'node_type': 'sf_node', 'egress': u'2f1d2140-42ce-4979-9542-7ef25796e536', 'next_group_id': None, 'nsp': 256, 'add_fcs': [], 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': pp_corr }, status ) self.assertEqual( [], self.executed_cmds ) def _prepare_delete_flow_rules_src_node_empty_del_fcs( self, pc_corr, pp_corr): self.port_mapping = { 'dd7374b9-a6ac-4a66-a4a6-7d3dee2a1579': { 'port_name': 'src_port', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '2f1d2140-42ce-4979-9542-7ef25796e536': { 'port_name': 'dst_port', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.delete_flow_rule( { 'nsi': 254, 'ingress': None, 'next_hops': None, 'del_fcs': [], 'group_refcnt': 1, 'node_type': 'sf_node', 'egress': u'2f1d2140-42ce-4979-9542-7ef25796e536', 'next_group_id': None, 'nsp': 256, 'add_fcs': [], 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': pp_corr }, status ) self.assertEqual( [], self.executed_cmds ) def _prepare_delete_flow_rules_src_node_del_fcs( self, pc_corr, pp_corr): self.port_mapping = { 'dd7374b9-a6ac-4a66-a4a6-7d3dee2a1579': { 'port_name': 'src_port', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '2f1d2140-42ce-4979-9542-7ef25796e536': { 'port_name': 'dst_port', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.delete_flow_rule( { 'nsi': 254, 'ingress': None, 'next_hops': None, 'del_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100, }], 'group_refcnt': 1, 'node_type': 'src_node', 'egress': u'2f1d2140-42ce-4979-9542-7ef25796e536', 'next_group_id': None, 'nsp': 256, 'add_fcs': [], 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': pp_corr }, status ) self.assertEqual( [], self.executed_cmds ) def _prepare_delete_flow_rules_sf_node_del_fcs( self, pc_corr, pp_corr): self.port_mapping = { 'dd7374b9-a6ac-4a66-a4a6-7d3dee2a1579': { 'port_name': 'src_port', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '2f1d2140-42ce-4979-9542-7ef25796e536': { 'port_name': 'dst_port', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.delete_flow_rule( { 'nsi': 254, 'ingress': u'dd7374b9-a6ac-4a66-a4a6-7d3dee2a1579', 'next_hops': None, 'del_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100, }], 'group_refcnt': 1, 'node_type': 'sf_node', 'egress': u'2f1d2140-42ce-4979-9542-7ef25796e536', 'next_group_id': None, 'nsp': 256, 'add_fcs': [], 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': pp_corr }, status ) self.assertEqual( [], self.executed_cmds ) def _prepare_delete_flow_rules_src_node_next_hops_del_fcs( self, pc_corr, pp_corr, pp_corr_nh): self.port_mapping = { '8768d2b3-746d-4868-ae0e-e81861c2b4e6': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '29e38fb2-a643-43b1-baa8-a86596461cd5': { 'port_name': 'port2', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.delete_flow_rule( { 'nsi': 255, 'ingress': None, 'next_hops': [{ 'net_uuid': '8768d2b3-746d-4868-ae0e-e81861c2b4e7', 'network_type': 'vxlan', 'segment_id': 33, 'gw_mac': '00:01:02:03:06:09', 'cidr': '10.0.0.0/8', 'local_endpoint': '10.0.0.2', 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'weight': 1, 'in_mac_address': '12:34:56:78:cf:23', 'pp_corr': pp_corr_nh }], 'add_fcs': [], 'group_refcnt': 1, 'node_type': 'src_node', 'egress': '29e38fb2-a643-43b1-baa8-a86596461cd5', 'next_group_id': 1, 'nsp': 256, 'del_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100, }], 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': pp_corr }, status ) self.assertEqual( [], self.executed_cmds ) def _prepare_delete_flow_rules_sf_node_next_hops_del_fcs( self, pc_corr, pp_corr, pp_corr_nh): self.port_mapping = { '8768d2b3-746d-4868-ae0e-e81861c2b4e6': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '29e38fb2-a643-43b1-baa8-a86596461cd5': { 'port_name': 'port2', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.delete_flow_rule( { 'nsi': 255, 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'next_hops': [{ 'net_uuid': '8768d2b3-746d-4868-ae0e-e81861c2b4e7', 'network_type': 'vxlan', 'segment_id': 33, 'gw_mac': '00:01:02:03:06:09', 'cidr': '10.0.0.0/8', 'local_endpoint': '10.0.0.2', 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'weight': 1, 'in_mac_address': '12:34:56:78:cf:23', 'pp_corr': pp_corr_nh }], 'add_fcs': [], 'group_refcnt': 1, 'node_type': 'sf_node', 'egress': '29e38fb2-a643-43b1-baa8-a86596461cd5', 'next_group_id': 1, 'nsp': 256, 'del_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100, }], 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': pp_corr }, status ) self.assertEqual( [], self.executed_cmds ) def _test_update_flow_rules_sf_node_next_hops_add_fcs_no_proxy_mpls( self, pp_corr_nh): self._prepare_update_flow_rules_sf_node_next_hops_add_fcs('mpls', 'mpls', pp_corr_nh) self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,output:2'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': 'group:1', 'eth_type': 34887, 'in_port': 42, 'mpls_label': 65791, 'priority': 30, 'table': 0 }, { 'actions': 'strip_vlan, output:6', 'dl_dst': '00:01:02:03:05:07', 'eth_type': 34887, 'dl_vlan': 0, 'mpls_label': 65792, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def _test_update_flow_rules_sf_node_next_hops_add_fcs_no_proxy_nsh( self, pp_corr_nh): self._prepare_update_flow_rules_sf_node_next_hops_add_fcs('nsh', 'nsh', pp_corr_nh) self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,output:2'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 35151, 'priority': 0, 'table': 5 }, { 'actions': 'group:1', 'eth_type': 35151, 'in_port': 42, 'nsh_mdtype': 1, 'nsh_si': 255, 'nsh_spi': 256, 'priority': 30, 'table': 0 }, { 'actions': 'strip_vlan, output:6', 'dl_dst': '00:01:02:03:05:07', 'eth_type': 35151, 'dl_vlan': 0, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 256, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def _test_update_flow_rules_sf_node_next_hops_same_h_a_no_proxy_mpls( self, pp_corr_nh): self._prepare_update_flow_rules_sf_node_next_hops_same_host_add_fcs( 'mpls', 'mpls', pp_corr_nh) self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': 'group:1', 'eth_type': 34887, 'in_port': 42, 'mpls_label': 65791, 'priority': 30, 'table': 0 }, { 'actions': 'strip_vlan, output:6', 'dl_dst': '00:01:02:03:05:07', 'eth_type': 34887, 'dl_vlan': 0, 'mpls_label': 65792, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def _test_update_flow_rules_sf_node_next_hops_same_h_a_no_proxy_nsh( self, pp_corr_nh): self._prepare_update_flow_rules_sf_node_next_hops_same_host_add_fcs( 'nsh', 'nsh', pp_corr_nh) self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 35151, 'priority': 0, 'table': 5 }, { 'actions': 'group:1', 'eth_type': 35151, 'in_port': 42, 'nsh_mdtype': 1, 'nsh_si': 255, 'nsh_spi': 256, 'priority': 30, 'table': 0 }, { 'actions': 'strip_vlan, output:6', 'dl_dst': '00:01:02:03:05:07', 'eth_type': 35151, 'dl_vlan': 0, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 256, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def _test_delete_flow_rules_src_node_empty_del_fcs( self, pc_corr, pp_corr): self._prepare_delete_flow_rules_src_node_empty_del_fcs(pc_corr, pp_corr) self.assertEqual( [], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def _test_delete_flow_rules_src_node_del_fcs( self, pc_corr, pp_corr): self._prepare_delete_flow_rules_src_node_del_fcs(pc_corr, pp_corr) self.assertEqual( [{ 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True, }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def _test_delete_flow_rules_src_node_next_hops_del_fcs( self, pc_corr, pp_corr, pp_corr_nh): self._prepare_delete_flow_rules_src_node_next_hops_del_fcs(pc_corr, pp_corr, pp_corr_nh) self.assertEqual( [{ 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True, }, { 'dl_dst': '12:34:56:78:cf:23', 'table': 5 }], self.deleted_flows ) self.assertEqual( [1], self.deleted_groups ) def _test_delete_flow_rules_sf_node_next_hops_del_fcs_no_proxy_mpls( self, pp_corr_nh): self._prepare_delete_flow_rules_sf_node_next_hops_del_fcs('mpls', 'mpls', pp_corr_nh) self.assertEqual( [{ 'eth_type': 34887, 'mpls_label': 65791, 'in_port': 42, 'priority': 30, 'table': 0, 'strict': True, }, { 'dl_dst': '12:34:56:78:cf:23', 'table': 5 }, { 'dl_dst': '00:01:02:03:05:07', 'eth_type': 34887, 'mpls_label': 65792, 'table': 10 }], self.deleted_flows ) self.assertEqual( [1], self.deleted_groups ) def _test_delete_flow_rules_sf_node_next_hops_del_fcs_no_proxy_nsh( self, pp_corr_nh): self._prepare_delete_flow_rules_sf_node_next_hops_del_fcs( 'nsh', 'nsh', pp_corr_nh) self.assertEqual( [{ 'eth_type': 35151, 'in_port': 42, 'nsh_mdtype': 1, 'nsh_si': 255, 'nsh_spi': 256, 'priority': 30, 'table': 0, 'strict': True }, { 'dl_dst': '12:34:56:78:cf:23', 'table': 5 }, { 'dl_dst': '00:01:02:03:05:07', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_si': 256, 'nsh_spi': 256, 'table': 10 }], self.deleted_flows ) self.assertEqual( [1], self.deleted_groups ) def _test_delete_flow_rules_sf_node_next_hops_del_fcs_mpls( self, pp_corr_nh): self._prepare_delete_flow_rules_sf_node_next_hops_del_fcs( 'mpls', None, pp_corr_nh) self.assertEqual( [{ 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True, }, { 'dl_dst': '12:34:56:78:cf:23', 'table': 5 }, { 'dl_dst': '00:01:02:03:05:07', 'eth_type': 34887, 'mpls_label': 65792, 'table': 10 }], self.deleted_flows ) self.assertEqual( [1], self.deleted_groups ) def _test_delete_flow_rules_sf_node_next_hops_del_fcs_nsh( self, pp_corr_nh): self._prepare_delete_flow_rules_sf_node_next_hops_del_fcs('nsh', None, pp_corr_nh) self.assertEqual( [{ 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }, { 'dl_dst': '12:34:56:78:cf:23', 'table': 5 }, { 'dl_dst': '00:01:02:03:05:07', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_si': 256, 'nsh_spi': 256, 'table': 10 }], self.deleted_flows ) self.assertEqual( [1], self.deleted_groups ) def test_update_flow_rules_sf_node_empty_next_hops_mpls(self): self._prepare_update_flow_rules_sf_node_empty_next_hops('mpls', None) self.assertEqual( [], self.executed_cmds ) self.assertEqual( [{ 'actions': 'strip_vlan, pop_mpls:0x0800,output:6', 'dl_dst': '00:01:02:03:05:07', 'eth_type': 34887, 'dl_vlan': 0, 'mpls_label': 65791, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( {}, self.group_mapping ) def test_update_flow_rules_sf_node_empty_next_hops_nsh(self): self._prepare_update_flow_rules_sf_node_empty_next_hops('nsh', None) self.assertEqual( [], self.executed_cmds ) self.assertEqual( [{ 'actions': ( 'strip_vlan,move:NXM_OF_ETH_DST->OXM_OF_PKT_REG0[0..47],' 'decap(),decap(),' 'move:OXM_OF_PKT_REG0[0..47]->NXM_OF_ETH_DST,output:6'), 'dl_dst': '00:01:02:03:05:07', 'eth_type': 35151, 'dl_vlan': 0, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 255, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( {}, self.group_mapping ) def test_update_flow_rules_sf_node_empty_next_hops_no_proxy_mpls(self): self._prepare_update_flow_rules_sf_node_empty_next_hops('mpls', 'mpls') self.assertEqual( [], self.executed_cmds ) self.assertEqual( [{ 'actions': 'strip_vlan, output:6', 'dl_dst': '00:01:02:03:05:07', 'eth_type': 34887, 'dl_vlan': 0, 'mpls_label': 65791, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( {}, self.group_mapping ) def test_update_flow_rules_sf_node_empty_next_hops_no_proxy_nsh(self): self._prepare_update_flow_rules_sf_node_empty_next_hops('nsh', 'nsh') self.assertEqual( [], self.executed_cmds ) self.assertEqual( [{ 'actions': 'strip_vlan, output:6', 'dl_dst': '00:01:02:03:05:07', 'eth_type': 35151, 'dl_vlan': 0, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 255, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( {}, self.group_mapping ) def test_update_flow_rules_src_node_empty_next_hops(self): self.port_mapping = { '2f1d2140-42ce-4979-9542-7ef25796e536': { 'port_name': 'dst_port', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.update_flow_rules( { 'nsi': 254, 'ingress': None, 'next_hops': None, 'del_fcs': [], 'group_refcnt': 1, 'node_type': 'src_node', 'egress': u'2f1d2140-42ce-4979-9542-7ef25796e536', 'next_group_id': None, 'nsp': 256, 'add_fcs': [], 'id': uuidutils.generate_uuid(), 'fwd_path': True }, status ) self.assertEqual( [], self.executed_cmds ) self.assertEqual( [], self.added_flows ) self.assertEqual( {}, self.group_mapping ) def test_update_flow_rules_src_node_empty_next_hops_add_fcs_del_fcs_mpls( self): self._test_update_flow_rules_src_empty_next_hops_a_d('mpls') def test_update_flow_rules_src_node_empty_next_hops_add_fcs_del_fcs_nsh( self): self._test_update_flow_rules_src_empty_next_hops_a_d('nsh') def test_update_flow_rules_src_node_empty_next_hops_a_d_no_proxy_mpls( self): self._prepare_update_flow_rules_src_node_empty_next_hops_a_d( 'mpls', 'mpls') self.assertEqual( [{ 'actions': 'normal', 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': u'10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( [{ 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': u'10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True, }], self.deleted_flows ) self.assertEqual( {}, self.group_mapping ) def test_update_flow_rules_src_node_empty_next_hops_a_d_no_proxy_nsh( self): self._prepare_update_flow_rules_src_node_empty_next_hops_a_d( 'nsh', 'nsh') self.assertEqual( [{ 'actions': 'normal', 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': u'10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( [{ 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': u'10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }], self.deleted_flows ) self.assertEqual( {}, self.group_mapping ) def test_update_flow_rules_sf_node_empty_next_hops_add_fcs_del_fcs_mpls( self): self._prepare_update_flow_rules_sf_node_empty_next_hops_a_d( 'mpls', None) self.assertEqual( [], self.executed_cmds ) self.assertEqual( [{ 'actions': 'normal', 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': u'10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }, { 'actions': 'strip_vlan, pop_mpls:0x0800,output:42', 'dl_dst': '00:01:02:03:06:08', 'eth_type': 34887, 'dl_vlan': 0, 'mpls_label': 65792, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( [{ 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': u'10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True, }], self.deleted_flows ) self.assertEqual( {}, self.group_mapping ) def test_update_flow_rules_sf_node_empty_next_hops_add_fcs_del_fcs_nsh( self): self._prepare_update_flow_rules_sf_node_empty_next_hops_a_d( 'nsh', None) self.assertEqual( [], self.executed_cmds ) self.assertEqual( [{ 'actions': 'normal', 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': u'10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }, { 'actions': ( 'strip_vlan,move:NXM_OF_ETH_DST->OXM_OF_PKT_REG0[0..47],' 'decap(),decap(),' 'move:OXM_OF_PKT_REG0[0..47]->NXM_OF_ETH_DST,output:42'), 'dl_dst': '00:01:02:03:06:08', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 256, 'dl_vlan': 0, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( [{ 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': u'10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True, }], self.deleted_flows ) self.assertEqual( {}, self.group_mapping ) # this test exercises the last SF_NODE in a chain with encapsulation def test_update_flow_rules_sf_node_empty_next_hops_a_d_no_proxy_mpls(self): self._prepare_update_flow_rules_sf_node_empty_next_hops_a_d( 'mpls', 'mpls') self.assertEqual( [{ 'actions': 'pop_mpls:0x0800,normal', 'eth_type': 34887, 'in_port': 42, 'mpls_label': 65791, 'priority': 30, 'table': 0 }, { 'actions': 'strip_vlan, output:42', 'dl_dst': '00:01:02:03:06:08', 'eth_type': 34887, 'dl_vlan': 0, 'mpls_label': 65792, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( [{ 'eth_type': 34887, 'in_port': 42, 'mpls_label': 65791, 'priority': 30, 'table': 0, 'strict': True, }], self.deleted_flows ) self.assertEqual( {}, self.group_mapping ) # this test exercises the last SF_NODE in a chain with encapsulation def test_update_flow_rules_sf_node_empty_next_hops_a_d_no_proxy_nsh(self): self._prepare_update_flow_rules_sf_node_empty_next_hops_a_d( 'nsh', 'nsh') self.assertEqual( [{ 'actions': 'decap(),decap(),normal', 'eth_type': 35151, 'in_port': 42, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 255, 'priority': 30, 'table': 0 }, { 'actions': 'strip_vlan, output:42', 'dl_dst': '00:01:02:03:06:08', 'eth_type': 35151, 'dl_vlan': 0, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 256, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( [{ 'eth_type': 35151, 'in_port': 42, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 255, 'priority': 30, 'table': 0, 'strict': True }], self.deleted_flows ) self.assertEqual( {}, self.group_mapping ) def test_update_flow_rules_src_node_next_hops_add_fcs_mpls(self): self._prepare_update_flow_rules_src_node_next_hops_add_fcs( 'mpls', None, None) self.assertEqual( [{ 'actions': ( 'push_mpls:0x8847,set_mpls_label:65791,' 'set_mpls_ttl:255,mod_vlan_vid:0,,output:2'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 2048, 'priority': 0, 'table': 5 }, { 'actions': 'group:1', 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_src_node_next_hops_add_fcs_nsh(self): self._prepare_update_flow_rules_src_node_next_hops_add_fcs( 'nsh', None, None) self.assertEqual( [{ 'actions': ( "encap(nsh,prop(class=nsh,type=md_type,val=1))," "set_field:0x100->nsh_spi,set_field:0xff->nsh_si," "encap(ethernet)," 'mod_vlan_vid:0,,output:2'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 2048, 'priority': 0, 'table': 5 }, { 'actions': 'group:1', 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_src_node_next_hops_add_fcs_no_proxy_mpls(self): self._prepare_update_flow_rules_src_node_next_hops_add_fcs( 'mpls', None, 'mpls') self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,output:2'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': ( 'push_mpls:0x8847,set_mpls_label:65791,' 'set_mpls_ttl:255,group:1'), 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_src_node_next_hops_add_fcs_no_proxy_nsh(self): self._prepare_update_flow_rules_src_node_next_hops_add_fcs( 'nsh', None, 'nsh') self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,output:2'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 35151, 'priority': 0, 'table': 5 }, { 'actions': ( "encap(nsh,prop(class=nsh,type=md_type,val=1))," "set_field:0x100->nsh_spi,set_field:0xff->nsh_si," "encap(ethernet)," 'group:1'), 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_src_node_next_hops_same_host_add_fcs_mpls(self): self._prepare_update_flow_rules_src_node_next_hops_same_host_a( 'mpls', None) self.assertEqual( [{ 'actions': ( 'push_mpls:0x8847,set_mpls_label:65791,' 'set_mpls_ttl:255,mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 2048, 'priority': 0, 'table': 5 }, { 'actions': 'group:1', 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_src_node_next_hops_same_host_add_fcs_nsh(self): self._prepare_update_flow_rules_src_node_next_hops_same_host_a( 'nsh', None) self.assertEqual( [{ 'actions': ( "encap(nsh,prop(class=nsh,type=md_type,val=1))," "set_field:0x100->nsh_spi,set_field:0xff->nsh_si," "encap(ethernet)," 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 2048, 'priority': 0, 'table': 5 }, { 'actions': 'group:1', 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_src_node_next_hops_same_host_a_no_proxy_mpls( self): self._prepare_update_flow_rules_src_node_next_hops_same_host_a( 'mpls', 'mpls') self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': ( 'push_mpls:0x8847,set_mpls_label:65791,' 'set_mpls_ttl:255,group:1'), 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_src_node_next_hops_same_host_a_no_proxy_nsh( self): self._prepare_update_flow_rules_src_node_next_hops_same_host_a( 'nsh', 'nsh') self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 35151, 'priority': 0, 'table': 5 }, { 'actions': ( "encap(nsh,prop(class=nsh,type=md_type,val=1))," "set_field:0x100->nsh_spi,set_field:0xff->nsh_si," "encap(ethernet)," 'group:1'), 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_sf_node_next_hops_add_fcs_mpls(self): self._prepare_update_flow_rules_sf_node_next_hops_add_fcs( 'mpls', None, None) self.assertEqual( [{ 'actions': ( 'push_mpls:0x8847,set_mpls_label:65791,' 'set_mpls_ttl:255,mod_vlan_vid:0,,output:2'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 2048, 'priority': 0, 'table': 5 }, { 'actions': 'group:1', 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }, { 'actions': 'strip_vlan, pop_mpls:0x0800,output:6', 'dl_dst': '00:01:02:03:05:07', 'eth_type': 34887, 'dl_vlan': 0, 'mpls_label': 65792, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_sf_node_next_hops_add_fcs_nsh(self): self._prepare_update_flow_rules_sf_node_next_hops_add_fcs( 'nsh', None, None) self.assertEqual( [{ 'actions': ( "encap(nsh,prop(class=nsh,type=md_type,val=1))," "set_field:0x100->nsh_spi,set_field:0xff->nsh_si," "encap(ethernet)," 'mod_vlan_vid:0,,output:2'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 2048, 'priority': 0, 'table': 5 }, { 'actions': 'group:1', 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }, { 'actions': ( 'strip_vlan,move:NXM_OF_ETH_DST->OXM_OF_PKT_REG0[0..47],' 'decap(),decap(),' 'move:OXM_OF_PKT_REG0[0..47]->NXM_OF_ETH_DST,output:6'), 'dl_dst': '00:01:02:03:05:07', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 256, 'dl_vlan': 0, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_sf_node_next_hops_add_fcs_nh_mpls(self): self._prepare_update_flow_rules_sf_node_next_hops_add_fcs( 'mpls', None, 'mpls') self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,output:2'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': ( 'push_mpls:0x8847,set_mpls_label:65791,' 'set_mpls_ttl:255,group:1'), 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }, { 'actions': 'strip_vlan, pop_mpls:0x0800,output:6', 'dl_dst': '00:01:02:03:05:07', 'eth_type': 34887, 'dl_vlan': 0, 'mpls_label': 65792, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_sf_node_next_hops_add_fcs_nh_nsh(self): self._prepare_update_flow_rules_sf_node_next_hops_add_fcs( 'nsh', None, 'nsh') self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,output:2'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 35151, 'priority': 0, 'table': 5 }, { 'actions': ( "encap(nsh,prop(class=nsh,type=md_type,val=1))," "set_field:0x100->nsh_spi,set_field:0xff->nsh_si," "encap(ethernet)," 'group:1'), 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }, { 'actions': ( 'strip_vlan,move:NXM_OF_ETH_DST->OXM_OF_PKT_REG0[0..47],' 'decap(),decap(),' 'move:OXM_OF_PKT_REG0[0..47]->NXM_OF_ETH_DST,output:6'), 'dl_dst': '00:01:02:03:05:07', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 256, 'dl_vlan': 0, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_sf_node_next_hops_add_fcs_no_proxy_mpls(self): self._test_update_flow_rules_sf_node_next_hops_add_fcs_no_proxy_mpls( None) def test_update_flow_rules_sf_node_next_hops_add_fcs_no_proxy_nsh(self): self._test_update_flow_rules_sf_node_next_hops_add_fcs_no_proxy_nsh( None) def test_update_flow_rules_sf_node_next_hops_add_fcs_no_proxy_nh_mpls( self): self._test_update_flow_rules_sf_node_next_hops_add_fcs_no_proxy_mpls( 'mpls') def test_update_flow_rules_sf_node_next_hops_add_fcs_no_proxy_nh_nsh( self): self._test_update_flow_rules_sf_node_next_hops_add_fcs_no_proxy_nsh( 'nsh') def test_update_flow_rules_sf_node_next_hops_same_host_add_fcs_mpls(self): self._prepare_update_flow_rules_sf_node_next_hops_same_host_add_fcs( 'mpls', None, None) self.assertEqual( [{ 'actions': ( 'push_mpls:0x8847,set_mpls_label:65791,set_mpls_ttl:255,' 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 2048, 'priority': 0, 'table': 5 }, { 'actions': 'group:1', 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }, { 'actions': 'strip_vlan, pop_mpls:0x0800,output:6', 'dl_dst': '00:01:02:03:05:07', 'eth_type': 34887, 'dl_vlan': 0, 'mpls_label': 65792, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_sf_node_next_hops_same_host_add_fcs_nsh(self): self._prepare_update_flow_rules_sf_node_next_hops_same_host_add_fcs( 'nsh', None, None) self.assertEqual( [{ 'actions': ( "encap(nsh,prop(class=nsh,type=md_type,val=1))," "set_field:0x100->nsh_spi,set_field:0xff->nsh_si," "encap(ethernet)," 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 2048, 'priority': 0, 'table': 5 }, { 'actions': 'group:1', 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }, { 'actions': ( 'strip_vlan,move:NXM_OF_ETH_DST->OXM_OF_PKT_REG0[0..47],' 'decap(),decap(),' 'move:OXM_OF_PKT_REG0[0..47]->NXM_OF_ETH_DST,output:6'), 'dl_dst': '00:01:02:03:05:07', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 256, 'dl_vlan': 0, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_sf_node_next_hops_same_host_add_fcs_nh_mpls( self): self._prepare_update_flow_rules_sf_node_next_hops_same_host_add_fcs( 'mpls', None, 'mpls') self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': ( 'push_mpls:0x8847,set_mpls_label:65791,' 'set_mpls_ttl:255,group:1'), 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }, { 'actions': 'strip_vlan, pop_mpls:0x0800,output:6', 'dl_dst': '00:01:02:03:05:07', 'eth_type': 34887, 'dl_vlan': 0, 'mpls_label': 65792, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_sf_node_next_hops_same_host_add_fcs_nh_nsh( self): self._prepare_update_flow_rules_sf_node_next_hops_same_host_add_fcs( 'nsh', None, 'nsh') self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 35151, 'priority': 0, 'table': 5 }, { 'actions': ( "encap(nsh,prop(class=nsh,type=md_type,val=1))," "set_field:0x100->nsh_spi,set_field:0xff->nsh_si," "encap(ethernet)," 'group:1'), 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }, { 'actions': ( 'strip_vlan,move:NXM_OF_ETH_DST->OXM_OF_PKT_REG0[0..47],' 'decap(),decap(),' 'move:OXM_OF_PKT_REG0[0..47]->NXM_OF_ETH_DST,output:6'), 'dl_dst': '00:01:02:03:05:07', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 256, 'dl_vlan': 0, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_sf_node_next_hops_same_h_a_no_proxy_nh_mpls( self): self._test_update_flow_rules_sf_node_next_hops_same_h_a_no_proxy_mpls( 'mpls' ) def test_update_flow_rules_sf_node_next_hops_same_h_a_no_proxy_nh_nsh( self): self._test_update_flow_rules_sf_node_next_hops_same_h_a_no_proxy_mpls( 'nsh' ) def test_update_flow_rules_sf_node_next_hops_same_h_a_no_proxy_mpls(self): self._test_update_flow_rules_sf_node_next_hops_same_h_a_no_proxy_mpls( None) def test_update_flow_rules_sf_node_next_hops_same_h_a_no_proxy_nsh(self): self._test_update_flow_rules_sf_node_next_hops_same_h_a_no_proxy_nsh( None) def test_update_flow_rules_src_node_graph_dependent_same_h_a_mpls(self): self._prepare_update_flow_rules_src_node_graph_dependent_a( 'mpls', '10.0.0.1', True) self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': ( 'push_mpls:0x8847,set_mpls_label:64252,' 'set_mpls_ttl:252,group:1'), 'reg0': 61640, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) self.assertEqual( [{ 'in_port': 42, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_update_flow_rules_src_node_graph_dependent_same_h_a_nsh(self): self._prepare_update_flow_rules_src_node_graph_dependent_a( 'nsh', '10.0.0.1', True) self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 35151, 'priority': 0, 'table': 5 }, { 'actions': ( "encap(nsh,prop(class=nsh,type=md_type,val=1))," "set_field:0xfa->nsh_spi,set_field:0xfc->nsh_si," "encap(ethernet)," 'group:1'), 'reg0': 61640, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) self.assertEqual( [{ 'in_port': 42, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_update_flow_rules_src_node_graph_dependent_diff_h_a_mpls(self): self._prepare_update_flow_rules_src_node_graph_dependent_a( 'mpls', '10.0.0.2', True) self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,output:2'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': ( 'push_mpls:0x8847,set_mpls_label:64252,' 'set_mpls_ttl:252,group:1'), 'reg0': 61640, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) self.assertEqual( [{ 'in_port': 42, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_update_flow_rules_src_node_graph_dependent_diff_h_a_nsh(self): self._prepare_update_flow_rules_src_node_graph_dependent_a( 'nsh', '10.0.0.2', True) self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,output:2'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 35151, 'priority': 0, 'table': 5 }, { 'actions': ( "encap(nsh,prop(class=nsh,type=md_type,val=1))," "set_field:0xfa->nsh_spi,set_field:0xfc->nsh_si," "encap(ethernet)," 'group:1'), 'reg0': 61640, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) self.assertEqual( [{ 'in_port': 42, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_update_flow_rules_lastsf_node_graph_dependency_same_h_a_mpls( self): self._prepare_update_flow_rules_lastsf_node_graph_dependency_same_h_a( 'mpls', True) self.assertEqual( [{ 'actions': 'load:0xf0c8->NXM_NX_REG0[],' 'pop_mpls:0x0800,resubmit(,0)', 'eth_type': 34887, 'in_port': 42, 'mpls_label': 61640, 'priority': 30, 'table': 0 }, { 'actions': 'strip_vlan, output:42', 'dl_dst': '00:01:02:03:06:08', 'eth_type': 34887, 'dl_vlan': 0, 'mpls_label': 61641, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( {}, self.group_mapping ) self.assertEqual( [{ 'eth_type': 34887, 'in_port': 42, 'mpls_label': 61640, 'priority': 30, 'table': 0, 'strict': True }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_update_flow_rules_lastsf_node_graph_dependency_same_h_a_nsh( self): self._prepare_update_flow_rules_lastsf_node_graph_dependency_same_h_a( 'nsh', True) self.assertEqual( [{ 'actions': 'load:0xf0c8->NXM_NX_REG0[],' 'decap(),decap(),resubmit(,0)', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 240, 'nsh_si': 200, 'in_port': 42, 'priority': 30, 'table': 0 }, { 'actions': 'strip_vlan, output:42', 'dl_dst': '00:01:02:03:06:08', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 240, 'nsh_si': 201, 'dl_vlan': 0, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( {}, self.group_mapping ) self.assertEqual( [{ 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 240, 'nsh_si': 200, 'in_port': 42, 'priority': 30, 'table': 0, 'strict': True }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_update_flow_rules_src_node_graph_dependent_join_same_h_mpls(self): self._prepare_update_flow_rules_src_node_graph_dependent_join( 'mpls', '10.0.0.1', True) self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': ( 'push_mpls:0x8847,set_mpls_label:64252,' 'set_mpls_ttl:252,group:1'), 'reg0': 61640, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }, { 'actions': ( 'push_mpls:0x8847,set_mpls_label:64252,' 'set_mpls_ttl:252,group:1'), 'reg0': 64100, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) self.assertEqual( [{ 'in_port': 42, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_update_flow_rules_src_node_graph_dependent_join_same_h_nsh(self): self._prepare_update_flow_rules_src_node_graph_dependent_join( 'nsh', '10.0.0.1', True) self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 35151, 'priority': 0, 'table': 5 }, { 'actions': ( "encap(nsh,prop(class=nsh,type=md_type,val=1))," "set_field:0xfa->nsh_spi,set_field:0xfc->nsh_si," "encap(ethernet)," 'group:1'), 'reg0': 61640, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }, { 'actions': ( "encap(nsh,prop(class=nsh,type=md_type,val=1))," "set_field:0xfa->nsh_spi,set_field:0xfc->nsh_si," "encap(ethernet)," 'group:1'), 'reg0': 64100, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) self.assertEqual( [{ 'in_port': 42, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_update_flow_rules_sf_node_many_hops_all_no_proxy_mpls(self): self._prepare_update_flow_rules_sf_node_many_hops_all_encap( 'mpls', 'mpls', 'mpls') self._assert_update_flow_rules_sf_node_many_hops_no_proxy_mpls() def test_update_flow_rules_sf_node_many_hops_all_no_proxy_nsh(self): self._prepare_update_flow_rules_sf_node_many_hops_all_encap( 'nsh', 'nsh', 'nsh') self._assert_update_flow_rules_sf_node_many_hops_no_proxy_nsh() def test_update_flow_rules_sf_node_many_hops_all_mpls(self): self._prepare_update_flow_rules_sf_node_many_hops_all_encap( 'mpls', None, 'mpls') self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:ab:cd', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': ( 'push_mpls:0x8847,set_mpls_label:65791,' 'set_mpls_ttl:255,group:1'), 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }, { 'actions': 'strip_vlan, pop_mpls:0x0800,output:6', 'dl_dst': '00:01:02:03:05:07', 'eth_type': 34887, 'dl_vlan': 0, 'mpls_label': 65792, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5),' 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:ab:cd, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_sf_node_many_hops_all_nsh(self): self._prepare_update_flow_rules_sf_node_many_hops_all_encap( 'nsh', None, 'nsh') self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 35151, 'priority': 0, 'table': 5 }, { 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:ab:cd', 'eth_type': 35151, 'priority': 0, 'table': 5 }, { 'actions': ( "encap(nsh,prop(class=nsh,type=md_type,val=1))," "set_field:0x100->nsh_spi,set_field:0xff->nsh_si," "encap(ethernet)," 'group:1'), 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }, { 'actions': ( 'strip_vlan,move:NXM_OF_ETH_DST->OXM_OF_PKT_REG0[0..47],' 'decap(),decap(),' 'move:OXM_OF_PKT_REG0[0..47]->NXM_OF_ETH_DST,output:6'), 'dl_dst': '00:01:02:03:05:07', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 256, 'dl_vlan': 0, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5),' 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:ab:cd, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) def test_reverse_ufr_src_node_graph_dependent_same_h_a_mpls(self): self._prepare_update_flow_rules_src_node_graph_dependent_a( 'mpls', '10.0.0.1', False) # notice on_add=False self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': ( 'push_mpls:0x8847,set_mpls_label:64252,' 'set_mpls_ttl:252,group:1'), 'in_port': 42, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) self.assertEqual( [{ 'reg0': 61640, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_reverse_ufr_src_node_graph_dependent_same_h_a_nsh(self): self._prepare_update_flow_rules_src_node_graph_dependent_a( 'nsh', '10.0.0.1', False) # notice on_add=False self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 35151, 'priority': 0, 'table': 5 }, { 'actions': ( "encap(nsh,prop(class=nsh,type=md_type,val=1))," "set_field:0xfa->nsh_spi,set_field:0xfc->nsh_si," "encap(ethernet)," 'group:1'), 'in_port': 42, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) self.assertEqual( [{ 'reg0': 61640, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_reverse_ufr_lastsf_node_graph_dependency_same_h_a_mpls(self): # notice branch_point=False, which but could missing too, like in # test_update_flow_rules_sf_node_empty_next_hops_a_d_no_proxy() self._prepare_update_flow_rules_lastsf_node_graph_dependency_same_h_a( 'mpls', False) self.assertEqual( [{ 'actions': 'pop_mpls:0x0800,normal', 'eth_type': 34887, 'in_port': 42, 'mpls_label': 61640, 'priority': 30, 'table': 0 }, { 'actions': 'strip_vlan, output:42', 'dl_dst': '00:01:02:03:06:08', 'eth_type': 34887, 'dl_vlan': 0, 'mpls_label': 61641, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( {}, self.group_mapping ) self.assertEqual( [{ 'eth_type': 34887, 'in_port': 42, 'mpls_label': 61640, 'priority': 30, 'table': 0, 'strict': True }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_reverse_ufr_lastsf_node_graph_dependency_same_h_a_nsh(self): # notice branch_point=False, which but could missing too, like in # test_update_flow_rules_sf_node_empty_next_hops_a_d_no_proxy() self._prepare_update_flow_rules_lastsf_node_graph_dependency_same_h_a( 'nsh', False) self.assertEqual( [{ 'actions': 'decap(),decap(),normal', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 240, 'nsh_si': 200, 'in_port': 42, 'priority': 30, 'table': 0 }, { 'actions': 'strip_vlan, output:42', 'dl_dst': '00:01:02:03:06:08', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 240, 'nsh_si': 201, 'dl_vlan': 0, 'priority': 1, 'table': 10 }], self.added_flows ) self.assertEqual( {}, self.group_mapping ) self.assertEqual( [{ 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 240, 'nsh_si': 200, 'in_port': 42, 'priority': 30, 'table': 0, 'strict': True }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_reverse_ufr_src_node_graph_dependent_diff_h_a_mpls(self): self._prepare_update_flow_rules_src_node_graph_dependent_a( 'mpls', '10.0.0.2', False) self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,output:2'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': ( 'push_mpls:0x8847,set_mpls_label:64252,' 'set_mpls_ttl:252,group:1'), 'in_port': 42, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) self.assertEqual( [{ 'reg0': 61640, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_reverse_ufr_src_node_graph_dependent_diff_h_a_nsh(self): self._prepare_update_flow_rules_src_node_graph_dependent_a( 'nsh', '10.0.0.2', False) self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,output:2'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 35151, 'priority': 0, 'table': 5 }, { 'actions': ( "encap(nsh,prop(class=nsh,type=md_type,val=1))," "set_field:0xfa->nsh_spi,set_field:0xfc->nsh_si," "encap(ethernet)," 'group:1'), 'in_port': 42, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) self.assertEqual( [{ 'reg0': 61640, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_reverse_ufr_src_node_graph_dependent_join_same_h_mpls(self): self._prepare_update_flow_rules_src_node_graph_dependent_join( 'mpls', '10.0.0.1', False) self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': ( 'push_mpls:0x8847,set_mpls_label:64252,' 'set_mpls_ttl:252,group:1'), 'in_port': 42, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) self.assertEqual( [{ 'reg0': 61640, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }, { 'reg0': 64100, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_reverse_ufr_src_node_graph_dependent_join_same_h_nsh(self): self._prepare_update_flow_rules_src_node_graph_dependent_join( 'nsh', '10.0.0.1', False) self.assertEqual( [{ 'actions': ( 'mod_vlan_vid:0,,resubmit(,10)'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 35151, 'priority': 0, 'table': 5 }, { 'actions': ( "encap(nsh,prop(class=nsh,type=md_type,val=1))," "set_field:0xfa->nsh_spi,set_field:0xfc->nsh_si," "encap(ethernet)," 'group:1'), 'in_port': 42, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 1: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 1, 'type': 'select' } }, self.group_mapping ) self.assertEqual( [{ 'reg0': 61640, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }, { 'reg0': 64100, 'eth_type': 2048, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_delete_flow_rules_sf_node_empty_del_fcs_mpls(self): self._prepare_delete_flow_rules_sf_node_empty_del_fcs('mpls', None) self.assertEqual( [{ 'dl_dst': '00:01:02:03:05:07', 'eth_type': 34887, 'mpls_label': 65791, 'table': 10 }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_delete_flow_rules_sf_node_empty_del_fcs_nsh(self): self._prepare_delete_flow_rules_sf_node_empty_del_fcs('nsh', None) self.assertEqual( [{ 'dl_dst': '00:01:02:03:05:07', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 255, 'table': 10 }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_delete_flow_rules_sf_node_empty_del_fcs_no_proxy_mpls(self): self._prepare_delete_flow_rules_sf_node_empty_del_fcs('mpls', 'mpls') self.assertEqual( [{ 'dl_dst': '00:01:02:03:05:07', 'eth_type': 34887, 'mpls_label': 65791, 'table': 10 }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_delete_flow_rules_sf_node_empty_del_fcs_no_proxy_nsh(self): self._prepare_delete_flow_rules_sf_node_empty_del_fcs('nsh', 'nsh') self.assertEqual( [{ 'dl_dst': '00:01:02:03:05:07', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 255, 'table': 10 }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_delete_flow_rules_src_node_empty_del_fcs_mpls(self): self._test_delete_flow_rules_src_node_empty_del_fcs('mpls', None) def test_delete_flow_rules_src_node_empty_del_fcs_nsh(self): self._test_delete_flow_rules_src_node_empty_del_fcs('nsh', None) def test_delete_flow_rules_src_node_empty_del_fcs_no_proxy_mpls(self): self._test_delete_flow_rules_src_node_empty_del_fcs('mpls', 'mpls') def test_delete_flow_rules_src_node_empty_del_fcs_no_proxy_nsh(self): self._test_delete_flow_rules_src_node_empty_del_fcs('nsh', 'nsh') def test_delete_flow_rules_sf_node_del_fcs_mpls(self): self._prepare_delete_flow_rules_sf_node_del_fcs('mpls', None) self.assertEqual( [{ 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True, }, { 'dl_dst': '00:01:02:03:05:07', 'eth_type': 34887, 'mpls_label': 65791, 'table': 10 }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_delete_flow_rules_sf_node_del_fcs_nsh(self): self._prepare_delete_flow_rules_sf_node_del_fcs('nsh', None) self.assertEqual( [{ 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }, { 'dl_dst': '00:01:02:03:05:07', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 255, 'table': 10 }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_delete_flow_rules_sf_node_del_fcs_no_proxy_mpls(self): self._prepare_delete_flow_rules_sf_node_del_fcs('mpls', 'mpls') self.assertEqual( [{ 'eth_type': 34887, 'in_port': 42, 'mpls_label': 65790, 'priority': 30, 'table': 0, 'strict': True, }, { 'dl_dst': '00:01:02:03:05:07', 'eth_type': 34887, 'mpls_label': 65791, 'table': 10 }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_delete_flow_rules_sf_node_del_fcs_no_proxy_nsh(self): self._prepare_delete_flow_rules_sf_node_del_fcs('nsh', 'nsh') self.assertEqual( [{ 'eth_type': 35151, 'in_port': 42, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 254, 'priority': 30, 'table': 0, 'strict': True }, { 'dl_dst': '00:01:02:03:05:07', 'eth_type': 35151, 'nsh_mdtype': 1, 'nsh_spi': 256, 'nsh_si': 255, 'table': 10 }], self.deleted_flows ) self.assertEqual( [], self.deleted_groups ) def test_delete_flow_rules_src_node_del_fcs_mpls(self): self._test_delete_flow_rules_src_node_del_fcs('mpls', None) def test_delete_flow_rules_src_node_del_fcs_nsh(self): self._test_delete_flow_rules_src_node_del_fcs('nsh', None) def test_delete_flow_rules_src_node_del_fcs_no_proxy_mpls(self): self._test_delete_flow_rules_src_node_del_fcs('mpls', 'mpls') def test_delete_flow_rules_src_node_del_fcs_no_proxy_nsh(self): self._test_delete_flow_rules_src_node_del_fcs('nsh', 'nsh') def test_delete_flow_rules_src_node_next_hops_del_fcs_mpls(self): self._test_delete_flow_rules_src_node_next_hops_del_fcs( 'mpls', None, None) def test_delete_flow_rules_src_node_next_hops_del_fcs_nsh(self): self._test_delete_flow_rules_src_node_next_hops_del_fcs( 'nsh', None, None) def test_delete_flow_rules_src_node_next_hops_del_fcs_no_proxy_mpls(self): self._test_delete_flow_rules_src_node_next_hops_del_fcs('mpls', 'mpls', None) def test_delete_flow_rules_src_node_next_hops_del_fcs_no_proxy_nsh(self): self._test_delete_flow_rules_src_node_next_hops_del_fcs('nsh', 'nsh', None) def test_delete_flow_rules_src_node_next_hops_del_fcs_nh_mpls(self): self._test_delete_flow_rules_src_node_next_hops_del_fcs('mpls', None, 'mpls') def test_delete_flow_rules_src_node_next_hops_del_fcs_nh_nsh(self): self._test_delete_flow_rules_src_node_next_hops_del_fcs('nsh', None, 'nsh') def test_delete_flow_rules_src_node_next_hops_del_fcs_no_proxy_nh_mpls( self): self._test_delete_flow_rules_src_node_next_hops_del_fcs( 'mpls', 'mpls', 'mpls') def test_delete_flow_rules_src_node_next_hops_del_fcs_no_proxy_nh_nsh( self): self._test_delete_flow_rules_src_node_next_hops_del_fcs( 'nsh', 'nsh', 'nsh') def test_delete_flow_rules_sf_node_next_hops_del_fcs_mpls(self): self._test_delete_flow_rules_sf_node_next_hops_del_fcs_mpls(None) def test_delete_flow_rules_sf_node_next_hops_del_fcs_nsh(self): self._test_delete_flow_rules_sf_node_next_hops_del_fcs_nsh(None) def test_delete_flow_rules_sf_node_next_hops_del_fcs_nh_mpls(self): self._test_delete_flow_rules_sf_node_next_hops_del_fcs_mpls('mpls') def test_delete_flow_rules_sf_node_next_hops_del_fcs_nh_nsh(self): self._test_delete_flow_rules_sf_node_next_hops_del_fcs_nsh('nsh') def test_delete_flow_rules_sf_node_next_hops_del_fcs_no_proxy_mpls(self): self._test_delete_flow_rules_sf_node_next_hops_del_fcs_no_proxy_mpls( None) def test_delete_flow_rules_sf_node_next_hops_del_fcs_no_proxy_nsh(self): self._test_delete_flow_rules_sf_node_next_hops_del_fcs_no_proxy_nsh( None) def test_delete_flow_rules_sf_node_next_hops_del_fcs_no_proxy_nh_mpls( self): self._test_delete_flow_rules_sf_node_next_hops_del_fcs_no_proxy_mpls( 'mpls') def test_delete_flow_rules_sf_node_next_hops_del_fcs_no_proxy_nh_nsh( self): self._test_delete_flow_rules_sf_node_next_hops_del_fcs_no_proxy_nsh( 'nsh') def test_init_agent_empty_flowrules(self): # in setUp we call _clear_local_entries() so whatever was done # during initialize() is lost ; here, we really want to check the # _clear_sfc_flow_on_int_br done at initialize self.sfc_driver._clear_sfc_flow_on_int_br() self.assertEqual( [{ 'eth_type': 34887, 'instructions': [ self.ofpp.OFPInstructionGotoTable(table_id=10)], 'match': None, 'priority': 20, 'table_id': 0 }, { 'eth_type': 35151, 'instructions': [ self.ofpp.OFPInstructionGotoTable(table_id=10)], 'match': None, 'priority': 20, 'table_id': 0 }, { 'instructions': [], 'match': None, 'priority': 0, 'table_id': 10 }], self.installed_instructions ) self.assertEqual( ["all"], self.deleted_groups ) self.assertEqual({}, self.group_mapping) def _prepare_update_flow_rules_src_node_next_hops_tap_sf_add_fcs( self, pc_corr, pp_corr_nh): self.port_mapping = { '8768d2b3-746d-4868-ae0e-e81861c2b4e6': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '29e38fb2-a643-43b1-baa8-a86596461cd5': { 'port_name': 'port2', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.update_flow_rules( { 'nsi': 253, 'ingress': None, 'next_hops': [{ 'local_endpoint': '10.0.0.2', 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'weight': 1, 'net_uuid': '8768d2b3-746d-4868-ae0e-e81861c2b4e7', 'network_type': 'vxlan', 'segment_id': 33, 'gw_mac': '00:01:02:03:06:09', 'cidr': '10.0.0.0/8', 'in_mac_address': '12:34:56:78:cf:23', 'pp_corr': pp_corr_nh, 'tap_enabled': True, 'nsi': 253, # 'nsi' of TAP node 'nsp': 256, 'tap_nh_node_type': 'sf_node', 'pp_corr_tap_nh': None }], 'del_fcs': [], 'group_refcnt': 1, 'node_type': 'src_node', 'egress': '29e38fb2-a643-43b1-baa8-a86596461cd5', 'next_group_id': 1, 'nsp': 256, 'add_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100, }], 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': None, 'segment_id': 33 }, status ) self.assertEqual( [], self.executed_cmds ) def test_update_flow_rules_src_node_next_hop_tap_sf_add_fcs(self): self._prepare_update_flow_rules_src_node_next_hops_tap_sf_add_fcs( 'mpls', None) self.assertEqual( [{ 'actions': ('push_mpls:0x8847,set_mpls_label:65789,' 'set_mpls_ttl:253,mod_vlan_vid:0,output:2'), 'in_port': 42, 'dl_src': '00:01:02:03:06:08', 'eth_type': 2048, 'priority': 0, 'table': 7 }, { 'actions': 'resubmit(,25)', 'dl_src': '00:01:02:03:06:08', 'eth_type': 34887, 'in_port': 1, 'mpls_label': 65789, 'priority': 30, 'table': 0 }, { 'actions': 'strip_vlan,load:0x21->NXM_NX_TUN_ID[],' 'output:77,output:88', 'dl_src': '00:01:02:03:06:08', 'eth_type': 34887, 'in_port': 1, 'mpls_label': 65789, 'priority': 0, 'table': 25 }, { 'actions': 'group:1,resubmit(,7)', 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) def _prepare_update_ingress_flow_tap_sf(self, pc_corr, pp_corr_nh): self.port_mapping = { '8768d2b3-746d-4868-ae0e-e81861c2b4e6': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', } } status = [] self.sfc_driver.update_flow_rules( { 'nsi': 253, 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'egress': None, 'mac_address': '00:01:02:03:05:07', 'node_type': 'sf_node', 'next_group_id': 1, 'nsp': 256, 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': None, 'tap_enabled': True }, status ) self.assertEqual( [], self.executed_cmds ) def test_update_ingress_flow_rule_tap_sf(self): self._prepare_update_ingress_flow_tap_sf('mpls', None) self.assertEqual( [{ 'actions': 'strip_vlan, pop_mpls:0x8847,output:6', 'dl_src': '00:01:02:03:05:07', 'eth_type': 34887, 'dl_vlan': 0, 'mpls_label': 65789, 'priority': 1, 'table': 10 }], self.added_flows ) def _prepare_delete_ingress_flow_tap_sf(self, pc_corr, pp_corr_nh): self.port_mapping = { '8768d2b3-746d-4868-ae0e-e81861c2b4e6': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', } } status = [] self.sfc_driver.delete_flow_rule( { 'nsi': 253, 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'egress': None, 'mac_address': '00:01:02:03:05:07', 'node_type': 'sf_node', 'next_group_id': 1, 'nsp': 256, 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': None, 'tap_enabled': True }, status ) self.assertEqual( [], self.executed_cmds ) def test_delete_ingress_flow_rule_tap_sf(self): self._prepare_delete_ingress_flow_tap_sf('mpls', None) self.assertEqual( [{ 'dl_src': '00:01:02:03:05:07', 'eth_type': 34887, 'mpls_label': 65789, 'table': 10 }], self.deleted_flows ) def _prepare_update_flow_rules_tap_node_next_hop_default_sf_add_fcs( self, pc_corr, pp_corr_nh): self.port_mapping = { '8768d2b3-746d-4868-ae0e-e81861c2b4e6': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '29e38fb2-a643-43b1-baa8-a86596461cd5': { 'port_name': 'port2', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.update_flow_rules( { 'nsi': 255, 'ingress': None, 'next_hops': [{ 'local_endpoint': '10.0.0.2', 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'weight': 1, 'net_uuid': '8768d2b3-746d-4868-ae0e-e81861c2b4e7', 'network_type': 'vxlan', 'segment_id': 33, 'gw_mac': '00:01:02:03:06:09', 'cidr': '10.0.0.0/8', 'in_mac_address': '12:34:56:78:cf:23', 'pp_corr': pp_corr_nh, }], 'del_fcs': [], 'group_refcnt': 0, 'node_type': 'src_node', 'egress': '29e38fb2-a643-43b1-baa8-a86596461cd5', 'next_group_id': 2, 'nsp': 256, 'add_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100, }], 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': None, 'skip_ingress_flow_config': True, 'segment_id': 33, 'tap_enabled': True }, status ) self.assertEqual( [], self.executed_cmds ) def test_update_flow_rules_tap_node_next_hop_default_sf_add_fcs( self): self._prepare_update_flow_rules_tap_node_next_hop_default_sf_add_fcs( 'mpls', None ) self.assertEqual([{ 'actions': ('push_mpls:0x8847,set_mpls_label:65791,' 'set_mpls_ttl:255,mod_vlan_vid:0,,output:2'), 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 2048, 'priority': 0, 'table': 5 }, { 'actions': 'group:2,resubmit(,7)', 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 2: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 2, 'type': 'select' } }, self.group_mapping ) def test_update_flow_rules_tap_node_next_hop_default_sf_mpls_add_fcs( self): # SRC -> TAP -> DEFAULT_SF(MPLS) -> DST self._prepare_update_flow_rules_tap_node_next_hop_default_sf_add_fcs( 'mpls', 'mpls' ) self.assertEqual([{ 'actions': 'mod_vlan_vid:0,,output:2', 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': ('push_mpls:0x8847,set_mpls_label:65791,' 'set_mpls_ttl:255,group:2,resubmit(,7)'), 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 2: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 2, 'type': 'select' } }, self.group_mapping ) def _prep_flow_def_sf_nxt_hop_tap_node_nxt_hop_def_sf_mpls_add_fcs( self, pc_corr, pp_corr_nh): self.port_mapping = { '8768d2b3-746d-4868-ae0e-e81861c2b4e6': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '29e38fb2-a643-43b1-baa8-a86596461cd5': { 'port_name': 'port2', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.update_flow_rules( { 'nsi': 254, 'ingress': None, 'next_hops': [{ 'local_endpoint': '10.0.0.2', 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'weight': 1, 'net_uuid': '8768d2b3-746d-4868-ae0e-e81861c2b4e7', 'network_type': 'vxlan', 'segment_id': 33, 'gw_mac': '00:01:02:03:06:09', 'cidr': '10.0.0.0/8', 'in_mac_address': '12:34:56:78:cf:23', 'pp_corr': pp_corr_nh, }], 'del_fcs': [], 'group_refcnt': 0, 'node_type': 'src_node', 'egress': '29e38fb2-a643-43b1-baa8-a86596461cd5', 'next_group_id': 2, 'nsp': 256, 'add_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100, }], 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': None, 'skip_ingress_flow_config': True, 'segment_id': 33, 'tap_enabled': True }, status ) self.assertEqual( [], self.executed_cmds ) def test_update_flows_def_sf_nxt_hop_tap_node_nxt_hop_def_sf_mpls_add_fcs( self): # SRC -> DEFAULT_SF -> TAP -> DEFAULT_SF(MPLS) -> DST self._prep_flow_def_sf_nxt_hop_tap_node_nxt_hop_def_sf_mpls_add_fcs( 'mpls', 'mpls' ) self.assertEqual([{ 'actions': 'mod_vlan_vid:0,,output:2', 'dl_dst': '12:34:56:78:cf:23', 'eth_type': 34887, 'priority': 0, 'table': 5 }, { 'actions': ('push_mpls:0x8847,set_mpls_label:65790,' 'set_mpls_ttl:254,group:2,resubmit(,7)'), 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }], self.added_flows ) self.assertEqual( { 2: { 'buckets': ( 'bucket=weight=1, ' 'mod_dl_dst:12:34:56:78:cf:23, ' 'resubmit(,5)' ), 'group_id': 2, 'type': 'select' } }, self.group_mapping ) def _prepare_delete_flow_rules_src_node_next_hops_tap_sf_del_fcs( self, pc_corr, pp_corr_nh): self.port_mapping = { '8768d2b3-746d-4868-ae0e-e81861c2b4e6': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '29e38fb2-a643-43b1-baa8-a86596461cd5': { 'port_name': 'port2', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.delete_flow_rule( { 'nsi': 255, 'ingress': None, 'next_hops': [{ 'local_endpoint': '10.0.0.2', 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'weight': 1, 'net_uuid': '8768d2b3-746d-4868-ae0e-e81861c2b4e7', 'network_type': 'vxlan', 'segment_id': 33, 'gw_mac': '00:01:02:03:06:09', 'cidr': '10.0.0.0/8', 'in_mac_address': '12:34:56:78:cf:23', 'pp_corr': pp_corr_nh, 'tap_enabled': True, 'nsi': 253, 'nsp': 256 }], 'add_fcs': [], 'group_refcnt': 1, 'node_type': 'src_node', 'egress': '29e38fb2-a643-43b1-baa8-a86596461cd5', 'next_group_id': 1, 'nsp': 256, 'del_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100, }], 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': None, 'segment_id': 33 }, status ) self.assertEqual( [], self.executed_cmds ) def test_delete_flow_rules_src_node_next_hops_tap_sf_del_fcs(self): self._prepare_delete_flow_rules_src_node_next_hops_tap_sf_del_fcs( 'mpls', None) self.assertEqual([ { 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff', 'strict': True }, { 'dl_src': '00:01:02:03:06:08', 'table': 7 }, { 'dl_src': '00:01:02:03:06:08', 'eth_type': 34887, 'in_port': 1, 'mpls_label': 65789, 'table': 0 }, { 'dl_src': '00:01:02:03:06:08', 'eth_type': 34887, 'in_port': 1, 'mpls_label': 65789, 'table': 25 }], self.deleted_flows ) def _prepare_delete_flow_rules_tap_node_next_hop_default_sf_del_fcs( self, pc_corr, pp_corr_nh): self.port_mapping = { '8768d2b3-746d-4868-ae0e-e81861c2b4e6': { 'port_name': 'port1', 'ofport': 6, 'vif_mac': '00:01:02:03:05:07', }, '29e38fb2-a643-43b1-baa8-a86596461cd5': { 'port_name': 'port2', 'ofport': 42, 'vif_mac': '00:01:02:03:06:08', } } status = [] self.sfc_driver.delete_flow_rule( { 'nsi': 254, 'ingress': None, 'next_hops': [{ 'local_endpoint': '10.0.0.2', 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6', 'weight': 1, 'net_uuid': '8768d2b3-746d-4868-ae0e-e81861c2b4e7', 'network_type': 'vxlan', 'segment_id': 33, 'gw_mac': '00:01:02:03:06:09', 'cidr': '10.0.0.0/8', 'in_mac_address': '12:34:56:78:cf:23', 'pp_corr': pp_corr_nh, }], 'add_fcs': [], 'group_refcnt': 0, 'node_type': 'src_node', 'egress': '29e38fb2-a643-43b1-baa8-a86596461cd5', 'next_group_id': 2, 'nsp': 256, 'del_fcs': [{ 'source_port_range_min': 100, 'destination_ip_prefix': u'10.200.0.0/16', 'protocol': u'tcp', 'l7_parameters': {}, 'source_port_range_max': 100, 'source_ip_prefix': '10.100.0.0/16', 'destination_port_range_min': 100, 'ethertype': 'IPv4', 'destination_port_range_max': 100, }], 'id': uuidutils.generate_uuid(), 'fwd_path': True, 'pc_corr': pc_corr, 'pp_corr': None, 'skip_ingress_flow_config': True, 'segment_id': 33, 'tap_enabled': True }, status ) self.assertEqual( [], self.executed_cmds ) def test_delete_flow_rules_tap_node_next_hop_default_sf_del_fcs(self): self._prepare_delete_flow_rules_tap_node_next_hop_default_sf_del_fcs( 'mpls', None ) self.assertEqual([ { 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'strict': True, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }, { 'dl_dst': '12:34:56:78:cf:23', 'table': 5 }], self.deleted_flows) self.assertEqual( [2], self.deleted_groups ) def test_delete_flow_rules_tap_node_next_hop_default_sf_mpls_del_fcs(self): self._prepare_delete_flow_rules_tap_node_next_hop_default_sf_del_fcs( 'mpls', 'mpls' ) self.assertEqual([ { 'eth_type': 2048, 'in_port': 42, 'nw_dst': u'10.200.0.0/16', 'nw_proto': 6, 'nw_src': '10.100.0.0/16', 'priority': 30, 'strict': True, 'table': 0, 'tp_dst': '0x64/0xffff', 'tp_src': '0x64/0xffff' }, { 'dl_dst': '12:34:56:78:cf:23', 'table': 5 }], self.deleted_flows) self.assertEqual( [2], self.deleted_groups ) networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/agent/extensions/test_sfc.py0000664000175000017500000000423613656750333032014 0ustar zuulzuul00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from neutron.plugins.ml2.drivers.openvswitch.agent import ( ovs_agent_extension_api as ovs_ext_api) from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native import ( ovs_bridge) from neutron.tests import base from neutron_lib import context from networking_sfc.services.sfc.agent.extensions import sfc class SfcAgentExtensionTestCase(base.BaseTestCase): def setUp(self): super(SfcAgentExtensionTestCase, self).setUp() conn_patcher = mock.patch('neutron.agent.ovsdb.impl_idl._connection') conn_patcher.start() self.addCleanup(conn_patcher.stop) self.sfc_ext = sfc.SfcAgentExtension() self.context = context.get_admin_context() self.connection = mock.Mock() os_ken_app = mock.Mock() self.agent_api = ovs_ext_api.OVSAgentExtensionAPI( ovs_bridge.OVSAgentBridge('br-int', os_ken_app=os_ken_app), ovs_bridge.OVSAgentBridge('br-tun', os_ken_app=os_ken_app)) self.sfc_ext.consume_api(self.agent_api) # Don't rely on used driver mock.patch( 'neutron.manager.NeutronManager.load_class_for_provider', return_value=lambda: mock.Mock(spec=sfc.SfcAgentDriver) ).start() self.sfc_ext.initialize( self.connection, constants.EXTENSION_DRIVER_TYPE) def test_update_empty_flow_rules(self): self.sfc_ext.update_flow_rules(self.context, flowrule_entries={}) self.assertFalse(self.sfc_ext.sfc_driver.update_flow_rules.called) networking-sfc-10.0.0/networking_sfc/tests/unit/services/sfc/test_plugin.py0000664000175000017500000013255413656750333027247 0ustar zuulzuul00000000000000# Copyright 2017 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from networking_sfc.services.sfc.common import context as sfc_ctx from networking_sfc.services.sfc.common import exceptions as sfc_exc from networking_sfc.tests.unit.db import test_sfc_db SFC_PLUGIN_KLASS = ( "networking_sfc.services.sfc.plugin.SfcPlugin" ) class SfcPluginTestCase(test_sfc_db.SfcDbPluginTestCase): def setUp(self, core_plugin=None, sfc_plugin=None, ext_mgr=None): if not sfc_plugin: sfc_plugin = SFC_PLUGIN_KLASS self.driver_manager_p = mock.patch( 'networking_sfc.services.sfc.driver_manager.SfcDriverManager' ) self.fake_driver_manager_class = self.driver_manager_p.start() self.fake_driver_manager = mock.Mock() self.fake_driver_manager_class.return_value = self.fake_driver_manager self.plugin_context = None self.plugin_context_precommit = None self.plugin_context_postcommit = None super(SfcPluginTestCase, self).setUp( core_plugin=core_plugin, sfc_plugin=sfc_plugin, ext_mgr=ext_mgr ) def _record_context(self, plugin_context): self.plugin_context = plugin_context def _record_context_precommit(self, plugin_context): self.plugin_context_precommit = plugin_context def _record_context_postcommit(self, plugin_context): self.plugin_context_postcommit = plugin_context def test_create_port_chain_driver_manager_called(self): self.fake_driver_manager.create_port_chain_precommit = mock.Mock( side_effect=self._record_context_precommit) self.fake_driver_manager.create_port_chain_postcommit = mock.Mock( side_effect=self._record_context_postcommit) with self.port_pair_group(port_pair_group={}) as pg: with self.port_chain(port_chain={ 'port_pair_groups': [pg['port_pair_group']['id']] }) as pc: driver_manager = self.fake_driver_manager (driver_manager.create_port_chain_precommit .assert_called_once_with(mock.ANY)) (driver_manager.create_port_chain_postcommit .assert_called_once_with(mock.ANY)) self.assertIsInstance( self.plugin_context_precommit, sfc_ctx.PortChainContext ) self.assertIsInstance( self.plugin_context_postcommit, sfc_ctx.PortChainContext ) self.assertIn('port_chain', pc) self.assertEqual( self.plugin_context_precommit.current, pc['port_chain']) self.assertEqual( self.plugin_context_postcommit.current, pc['port_chain']) def test_create_port_chain_precommit_driver_manager_exception(self): self.fake_driver_manager.create_port_chain_precommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='create_port_chain_precommit' ) ) with self.port_pair_group(port_pair_group={}) as pg: self._create_port_chain( self.fmt, {'port_pair_groups': [pg['port_pair_group']['id']]}, expected_res_status=500) self._test_list_resources('port_chain', []) (self.fake_driver_manager.create_port_chain_postcommit .assert_not_called()) self.fake_driver_manager.delete_port_chain.assert_not_called() def test_create_port_chain_postcommit_driver_manager_exception(self): self.fake_driver_manager.create_port_chain_postcommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='create_port_chain_postcommit' ) ) with self.port_pair_group(port_pair_group={}) as pg: self._create_port_chain( self.fmt, {'port_pair_groups': [pg['port_pair_group']['id']]}, expected_res_status=500) self._test_list_resources('port_chain', []) (self.fake_driver_manager.create_port_chain_precommit .assert_called_once_with(mock.ANY)) self.fake_driver_manager.delete_port_chain.assert_called_once_with( mock.ANY ) def test_update_port_chain_driver_manager_called(self): self.fake_driver_manager.update_port_chain_precommit = mock.Mock( side_effect=self._record_context_precommit) self.fake_driver_manager.update_port_chain_postcommit = mock.Mock( side_effect=self._record_context_postcommit) with self.port_pair_group(port_pair_group={}) as pg: with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']] }) as pc: req = self.new_update_request( 'port_chains', {'port_chain': {'name': 'test2'}}, pc['port_chain']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) driver_manager = self.fake_driver_manager (driver_manager.update_port_chain_precommit .assert_called_once_with(mock.ANY)) (driver_manager.update_port_chain_postcommit .assert_called_once_with(mock.ANY)) self.assertIsInstance( self.plugin_context_precommit, sfc_ctx.PortChainContext ) self.assertIsInstance( self.plugin_context_postcommit, sfc_ctx.PortChainContext ) self.assertIn('port_chain', pc) self.assertIn('port_chain', res) self.assertEqual( self.plugin_context_precommit.current, res['port_chain']) self.assertEqual( self.plugin_context_postcommit.current, res['port_chain']) self.assertEqual( self.plugin_context_precommit.original, pc['port_chain']) self.assertEqual( self.plugin_context_postcommit.original, pc['port_chain']) def _test_update_port_chain_driver_manager_exception(self, updated): with self.port_pair_group(port_pair_group={}) as pg: with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']] }) as pc: self.assertIn('port_chain', pc) original_port_chain = pc['port_chain'] req = self.new_update_request( 'port_chains', {'port_chain': {'name': 'test2'}}, pc['port_chain']['id'] ) updated_port_chain = copy.copy(original_port_chain) if updated: updated_port_chain['name'] = 'test2' res = req.get_response(self.ext_api) self.assertEqual(500, res.status_int) res = self._list('port_chains') self.assertIn('port_chains', res) self.assertItemsEqual( res['port_chains'], [updated_port_chain]) def test_update_port_chain_precommit_driver_manager_exception(self): self.fake_driver_manager.update_port_chain_precommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='update_port_chain_precommit' ) ) self._test_update_port_chain_driver_manager_exception(False) def test_update_port_chain_postcommit_driver_manager_exception(self): self.fake_driver_manager.update_port_chain_postcommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='update_port_chain_postcommit' ) ) self._test_update_port_chain_driver_manager_exception(True) def test_delete_port_chain_manager_called(self): self.fake_driver_manager.delete_port_chain = mock.Mock( side_effect=self._record_context) self.fake_driver_manager.delete_port_chain_precommit = mock.Mock( side_effect=self._record_context_precommit) self.fake_driver_manager.delete_port_chain_postcommit = mock.Mock( side_effect=self._record_context_postcommit) with self.port_pair_group(port_pair_group={}) as pg: with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']] }, do_delete=False) as pc: req = self.new_delete_request( 'port_chains', pc['port_chain']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(204, res.status_int) driver_manager = self.fake_driver_manager (driver_manager.delete_port_chain .assert_called_once_with(mock.ANY)) (driver_manager.delete_port_chain_precommit .assert_called_once_with(mock.ANY)) (driver_manager.delete_port_chain_postcommit .assert_called_once_with(mock.ANY)) self.assertIsInstance( self.plugin_context, sfc_ctx.PortChainContext ) self.assertIsInstance( self.plugin_context_precommit, sfc_ctx.PortChainContext ) self.assertIsInstance( self.plugin_context_postcommit, sfc_ctx.PortChainContext ) self.assertIn('port_chain', pc) self.assertEqual(self.plugin_context.current, pc['port_chain']) self.assertEqual(self.plugin_context_precommit.current, pc['port_chain']) self.assertEqual(self.plugin_context_postcommit.current, pc['port_chain']) def _test_delete_port_chain_driver_manager_exception(self): with self.port_pair_group(port_pair_group={ }, do_delete=False) as pg: with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg['port_pair_group']['id']] }, do_delete=False) as pc: req = self.new_delete_request( 'port_chains', pc['port_chain']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(500, res.status_int) self._test_list_resources('port_chain', [pc]) def test_delete_port_chain_driver_manager_exception(self): self.fake_driver_manager.delete_port_chain = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='delete_port_chain' ) ) self._test_delete_port_chain_driver_manager_exception() def test_delete_port_chain_driver_precommit_manager_exception(self): self.fake_driver_manager.delete_port_chain_precommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='delete_port_chain_precommit' ) ) self._test_delete_port_chain_driver_manager_exception() def test_create_port_pair_group_driver_manager_called(self): self.fake_driver_manager.create_port_pair_group_precommit = mock.Mock( side_effect=self._record_context_precommit) self.fake_driver_manager.create_port_pair_group_postcommit = mock.Mock( side_effect=self._record_context_postcommit) with self.port_pair_group(port_pair_group={}) as pc: fake_driver_manager = self.fake_driver_manager (fake_driver_manager.create_port_pair_group_precommit .assert_called_once_with(mock.ANY)) (fake_driver_manager.create_port_pair_group_postcommit .assert_called_once_with(mock.ANY)) self.assertIsInstance( self.plugin_context_precommit, sfc_ctx.PortPairGroupContext ) self.assertIsInstance( self.plugin_context_postcommit, sfc_ctx.PortPairGroupContext ) self.assertIn('port_pair_group', pc) self.assertEqual( self.plugin_context_precommit.current, pc['port_pair_group']) self.assertEqual( self.plugin_context_postcommit.current, pc['port_pair_group']) def test_create_port_pair_group_precommit_driver_manager_exception(self): self.fake_driver_manager.create_port_pair_group_precommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='create_port_pair_group_precommit' ) ) self._create_port_pair_group(self.fmt, {}, expected_res_status=500) self._test_list_resources('port_pair_group', []) driver_manager = self.fake_driver_manager (driver_manager.create_port_pair_group_precommit .assert_called_once_with(mock.ANY)) driver_manager.create_port_pair_group_postcommit.assert_not_called() driver_manager.delete_port_pair_group.assert_not_called() driver_manager.delete_port_pair_group_precommit.assert_not_called() driver_manager.delete_port_pair_group_postcommit.assert_not_called() def test_create_port_pair_group_postcommit_driver_manager_exception(self): self.fake_driver_manager.create_port_pair_group_postcommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='create_port_pair_group_postcommit' ) ) self._create_port_pair_group(self.fmt, {}, expected_res_status=500) self._test_list_resources('port_pair_group', []) driver_manager = self.fake_driver_manager (driver_manager.create_port_pair_group_precommit .assert_called_once_with(mock.ANY)) (driver_manager.delete_port_pair_group .assert_called_once_with(mock.ANY)) (driver_manager.delete_port_pair_group_precommit .assert_called_once_with(mock.ANY)) (driver_manager.delete_port_pair_group_postcommit .assert_called_once_with(mock.ANY)) def test_update_port_pair_group_driver_manager_called(self): self.fake_driver_manager.update_port_pair_group_precommit = mock.Mock( side_effect=self._record_context_precommit) self.fake_driver_manager.update_port_pair_group_postcommit = mock.Mock( side_effect=self._record_context_postcommit) with self.port_pair_group(port_pair_group={ 'name': 'test1' }) as pc: req = self.new_update_request( 'port_pair_groups', {'port_pair_group': {'name': 'test2'}}, pc['port_pair_group']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) driver_manager = self.fake_driver_manager (driver_manager.update_port_pair_group_precommit .assert_called_once_with(mock.ANY)) (driver_manager.update_port_pair_group_postcommit .assert_called_once_with(mock.ANY)) self.assertIsInstance( self.plugin_context_precommit, sfc_ctx.PortPairGroupContext ) self.assertIsInstance( self.plugin_context_postcommit, sfc_ctx.PortPairGroupContext ) self.assertIn('port_pair_group', pc) self.assertIn('port_pair_group', res) self.assertEqual( self.plugin_context_precommit.current, res['port_pair_group']) self.assertEqual( self.plugin_context_postcommit.current, res['port_pair_group']) self.assertEqual( self.plugin_context_precommit.original, pc['port_pair_group']) self.assertEqual( self.plugin_context_postcommit.original, pc['port_pair_group']) def _test_update_port_pair_group_driver_manager_exception(self, updated): with self.port_pair_group(port_pair_group={ 'name': 'test1' }) as pc: self.assertIn('port_pair_group', pc) original_port_pair_group = pc['port_pair_group'] req = self.new_update_request( 'port_pair_groups', {'port_pair_group': {'name': 'test2'}}, pc['port_pair_group']['id'] ) updated_port_pair_group = copy.copy(original_port_pair_group) if updated: updated_port_pair_group['name'] = 'test2' res = req.get_response(self.ext_api) self.assertEqual(500, res.status_int) res = self._list('port_pair_groups') self.assertIn('port_pair_groups', res) self.assertItemsEqual( res['port_pair_groups'], [updated_port_pair_group]) def test_update_port_pair_group_precommit_driver_manager_exception(self): self.fake_driver_manager.update_port_pair_group_precommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='update_port_pair_group_precommit' ) ) self._test_update_port_pair_group_driver_manager_exception(False) def test_update_port_pair_group_postcommit_driver_manager_exception(self): self.fake_driver_manager.update_port_pair_group_postcommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='update_port_pair_group_postcommit' ) ) self._test_update_port_pair_group_driver_manager_exception(True) def test_delete_port_pair_group_manager_called(self): self.fake_driver_manager.delete_port_pair_group = mock.Mock( side_effect=self._record_context) self.fake_driver_manager.delete_port_pair_group_precommit = mock.Mock( side_effect=self._record_context_precommit) self.fake_driver_manager.delete_port_pair_group_postcommit = mock.Mock( side_effect=self._record_context_postcommit) with self.port_pair_group(port_pair_group={ 'name': 'test1' }, do_delete=False) as pc: req = self.new_delete_request( 'port_pair_groups', pc['port_pair_group']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(204, res.status_int) driver_manager = self.fake_driver_manager driver_manager.delete_port_pair_group.assert_called_once_with( mock.ANY ) (driver_manager.delete_port_pair_group_precommit .assert_called_once_with(mock.ANY)) (driver_manager.delete_port_pair_group_postcommit .assert_called_once_with(mock.ANY)) self.assertIsInstance( self.plugin_context, sfc_ctx.PortPairGroupContext ) self.assertIsInstance( self.plugin_context_precommit, sfc_ctx.PortPairGroupContext ) self.assertIsInstance( self.plugin_context_postcommit, sfc_ctx.PortPairGroupContext ) self.assertIn('port_pair_group', pc) self.assertEqual( self.plugin_context.current, pc['port_pair_group']) self.assertEqual( self.plugin_context_precommit.current, pc['port_pair_group']) self.assertEqual( self.plugin_context_postcommit.current, pc['port_pair_group']) def _test_delete_port_pair_group_driver_manager_exception(self): with self.port_pair_group(port_pair_group={ 'name': 'test1' }, do_delete=False) as pc: req = self.new_delete_request( 'port_pair_groups', pc['port_pair_group']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(500, res.status_int) self._test_list_resources('port_pair_group', [pc]) def test_delete_port_pair_group_driver_manager_exception(self): self.fake_driver_manager.delete_port_pair_group = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='delete_port_pair_group' ) ) self._test_delete_port_pair_group_driver_manager_exception() def test_delete_port_pair_group_precommit_driver_manager_exception(self): self.fake_driver_manager.delete_port_pair_group_precommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='delete_port_pair_group_precommit' ) ) self._test_delete_port_pair_group_driver_manager_exception() def test_create_port_pair_driver_manager_called(self): self.fake_driver_manager.create_port_pair_precommit = mock.Mock( side_effect=self._record_context_precommit) self.fake_driver_manager.create_port_pair_postcommit = mock.Mock( side_effect=self._record_context_postcommit) with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pc: driver_manager = self.fake_driver_manager (driver_manager.create_port_pair_precommit .assert_called_once_with(mock.ANY)) (driver_manager.create_port_pair_postcommit .assert_called_once_with(mock.ANY)) self.assertIsInstance( self.plugin_context_precommit, sfc_ctx.PortPairContext ) self.assertIsInstance( self.plugin_context_postcommit, sfc_ctx.PortPairContext ) self.assertIn('port_pair', pc) self.assertEqual(self.plugin_context_precommit.current, pc['port_pair']) self.assertEqual(self.plugin_context_postcommit.current, pc['port_pair']) def test_create_port_pair_precommit_driver_manager_exception(self): self.fake_driver_manager.create_port_pair_precommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='create_port_pair_precommit' ) ) with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: self._create_port_pair( self.fmt, { 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }, expected_res_status=500) self._test_list_resources('port_pair', []) driver_manager = self.fake_driver_manager driver_manager.create_port_pair_postcommit.assert_not_called() driver_manager.delete_port_pair.assert_not_called() def test_create_port_pair_postcommit_driver_manager_exception(self): self.fake_driver_manager.create_port_pair_postcommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='create_port_pair_postcommit' ) ) with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: self._create_port_pair( self.fmt, { 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }, expected_res_status=500) self._test_list_resources('port_pair', []) driver_manager = self.fake_driver_manager driver_manager.create_port_pair_precommit.assert_called_once_with( mock.ANY ) driver_manager.delete_port_pair.assert_called_once_with( mock.ANY ) driver_manager.delete_port_pair_precommit.assert_called_once_with( mock.ANY ) driver_manager.delete_port_pair_postcommit.assert_called_once_with( mock.ANY ) def test_update_port_pair_driver_manager_called(self): self.fake_driver_manager.update_port_pair_precommit = mock.Mock( side_effect=self._record_context_precommit) self.fake_driver_manager.update_port_pair_postcommit = mock.Mock( side_effect=self._record_context_postcommit) with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'name': 'test1', 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pc: req = self.new_update_request( 'port_pairs', {'port_pair': {'name': 'test2'}}, pc['port_pair']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) driver_manager = self.fake_driver_manager (driver_manager.update_port_pair_precommit .assert_called_once_with(mock.ANY)) (driver_manager.update_port_pair_postcommit .assert_called_once_with(mock.ANY)) self.assertIsInstance( self.plugin_context_precommit, sfc_ctx.PortPairContext ) self.assertIsInstance( self.plugin_context_postcommit, sfc_ctx.PortPairContext ) self.assertIn('port_pair', pc) self.assertIn('port_pair', res) self.assertEqual( self.plugin_context_precommit.current, res['port_pair']) self.assertEqual( self.plugin_context_postcommit.current, res['port_pair']) self.assertEqual( self.plugin_context_precommit.original, pc['port_pair']) self.assertEqual( self.plugin_context_postcommit.original, pc['port_pair']) def _test_update_port_pair_driver_manager_exception(self, updated): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'name': 'test1', 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pc: self.assertIn('port_pair', pc) original_port_pair = pc['port_pair'] req = self.new_update_request( 'port_pairs', {'port_pair': {'name': 'test2'}}, pc['port_pair']['id'] ) updated_port_pair = copy.copy(original_port_pair) if updated: updated_port_pair['name'] = 'test2' res = req.get_response(self.ext_api) self.assertEqual(500, res.status_int) res = self._list('port_pairs') self.assertIn('port_pairs', res) self.assertItemsEqual(res['port_pairs'], [updated_port_pair]) def test_update_port_pair_precommit_driver_manager_exception(self): self.fake_driver_manager.update_port_pair_precommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='update_port_pair_precommit' ) ) self._test_update_port_pair_driver_manager_exception(False) def test_update_port_pair_postcommit_driver_manager_exception(self): self.fake_driver_manager.update_port_pair_postcommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='update_port_pair_postcommit' ) ) self._test_update_port_pair_driver_manager_exception(True) def test_delete_port_pair_manager_called(self): self.fake_driver_manager.delete_port_pair = mock.Mock( side_effect=self._record_context) self.fake_driver_manager.delete_port_pair_precommit = mock.Mock( side_effect=self._record_context_precommit) self.fake_driver_manager.delete_port_pair_postcommit = mock.Mock( side_effect=self._record_context_postcommit) with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'name': 'test1', 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }, do_delete=False) as pc: req = self.new_delete_request( 'port_pairs', pc['port_pair']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(204, res.status_int) fake_driver_manager = self.fake_driver_manager fake_driver_manager.delete_port_pair.assert_called_once_with( mock.ANY ) (fake_driver_manager.delete_port_pair_precommit .assert_called_once_with(mock.ANY)) (fake_driver_manager.delete_port_pair_postcommit .assert_called_once_with(mock.ANY)) self.assertIsInstance( self.plugin_context, sfc_ctx.PortPairContext ) self.assertIsInstance( self.plugin_context_precommit, sfc_ctx.PortPairContext ) self.assertIsInstance( self.plugin_context_postcommit, sfc_ctx.PortPairContext ) self.assertIn('port_pair', pc) self.assertEqual(self.plugin_context.current, pc['port_pair']) def _test_delete_port_pair_driver_manager_exception(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'name': 'test1', 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }, do_delete=False) as pc: req = self.new_delete_request( 'port_pairs', pc['port_pair']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(500, res.status_int) self._test_list_resources('port_pair', [pc]) def test_delete_port_pair_driver_manager_exception(self): self.fake_driver_manager.delete_port_pair = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='delete_port_pair' ) ) self._test_delete_port_pair_driver_manager_exception() def test_delete_port_pair_precommit_driver_manager_exception(self): self.fake_driver_manager.delete_port_pair_precommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='delete_port_pair_precommit' ) ) self._test_delete_port_pair_driver_manager_exception() def test_create_service_graph_driver_manager_called(self): self.fake_driver_manager.create_service_graph_precommit = mock.Mock( side_effect=self._record_context_precommit) self.fake_driver_manager.create_service_graph_postcommit = mock.Mock( side_effect=self._record_context_postcommit) with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain( port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]} ) as pc1, self.port_chain( port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]} ) as pc2: with self.service_graph( service_graph={ 'name': 'test1', 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id']] } } ) as graph: driver_manager = self.fake_driver_manager (driver_manager.create_service_graph_precommit .assert_called_once_with(mock.ANY)) (driver_manager.create_service_graph_postcommit .assert_called_once_with(mock.ANY)) self.assertIsInstance( self.plugin_context_precommit, sfc_ctx.ServiceGraphContext ) self.assertIsInstance( self.plugin_context_postcommit, sfc_ctx.ServiceGraphContext ) self.assertIn('service_graph', graph) self.assertEqual( self.plugin_context_precommit.current, graph['service_graph']) self.assertEqual( self.plugin_context_postcommit.current, graph['service_graph']) def test_create_service_graph_precommit_driver_manager_exception(self): self.fake_driver_manager.create_service_graph_precommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='create_service_graph_precommit' ) ) with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain( port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]} ) as pc1, self.port_chain( port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]} ) as pc2: self._create_service_graph(self.fmt, { 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id']] } }, expected_res_status=500) self._test_list_resources('service_graph', []) (self.fake_driver_manager.create_service_graph_postcommit .assert_not_called()) self.fake_driver_manager.delete_service_graph.assert_not_called() def test_create_service_graph_postcommit_driver_manager_exception(self): self.fake_driver_manager.create_service_graph_postcommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='create_service_graph_postcommit' ) ) with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain( port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]} ) as pc1, self.port_chain( port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]} ) as pc2: self._create_service_graph(self.fmt, { 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id']] } }, expected_res_status=500) self._test_list_resources('service_graph', []) (self.fake_driver_manager.create_service_graph_precommit .assert_called_once_with(mock.ANY)) self.fake_driver_manager.delete_service_graph_postcommit.\ assert_called_once_with(mock.ANY) def test_update_service_graph_driver_manager_called(self): self.fake_driver_manager.update_service_graph_precommit = mock.Mock( side_effect=self._record_context_precommit) self.fake_driver_manager.update_service_graph_postcommit = mock.Mock( side_effect=self._record_context_postcommit) with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain( port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]} ) as pc1, self.port_chain( port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]} ) as pc2: with self.service_graph(service_graph={ 'name': 'test1', 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id']] } }) as graph: req = self.new_update_request( 'service_graphs', {'service_graph': {'name': 'test2'}}, graph['service_graph']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) driver_manager = self.fake_driver_manager (driver_manager.update_service_graph_precommit .assert_called_once_with(mock.ANY)) (driver_manager.update_service_graph_postcommit .assert_called_once_with(mock.ANY)) self.assertIsInstance( self.plugin_context_precommit, sfc_ctx.ServiceGraphContext ) self.assertIsInstance( self.plugin_context_postcommit, sfc_ctx.ServiceGraphContext ) self.assertIn('service_graph', graph) self.assertIn('service_graph', res) self.assertEqual( self.plugin_context_precommit.current, res['service_graph']) self.assertEqual( self.plugin_context_postcommit.current, res['service_graph']) self.assertEqual( self.plugin_context_precommit.original, graph['service_graph']) self.assertEqual( self.plugin_context_postcommit.original, graph['service_graph']) def _test_update_service_graph_driver_manager_exception(self, updated): with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain( port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]} ) as pc1, self.port_chain( port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]} ) as pc2: with self.service_graph(service_graph={ 'name': 'test1', 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id']] } }) as graph: self.assertIn('service_graph', graph) original_service_graph = graph['service_graph'] req = self.new_update_request( 'service_graphs', {'service_graph': {'name': 'test2'}}, graph['service_graph']['id'] ) updated_service_graph = copy.copy(original_service_graph) if updated: updated_service_graph['name'] = 'test2' res = req.get_response(self.ext_api) self.assertEqual(500, res.status_int) res = self._list('service_graphs') self.assertIn('service_graphs', res) self.assertItemsEqual( res['service_graphs'], [updated_service_graph]) def test_update_service_graph_precommit_driver_manager_exception(self): self.fake_driver_manager.update_service_graph_precommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='update_service_graph_precommit' ) ) self._test_update_service_graph_driver_manager_exception(False) def test_update_service_graph_postcommit_driver_manager_exception(self): self.fake_driver_manager.update_service_graph_postcommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='update_service_graph_postcommit' ) ) self._test_update_service_graph_driver_manager_exception(True) def test_delete_service_graph_manager_called(self): self.fake_driver_manager.delete_service_graph_precommit = mock.Mock( side_effect=self._record_context_precommit) self.fake_driver_manager.delete_service_graph_postcommit = mock.Mock( side_effect=self._record_context_postcommit) with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain( port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]} ) as pc1, self.port_chain( port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]} ) as pc2: with self.service_graph(service_graph={ 'name': 'test1', 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id']] } }, do_delete=False) as graph: req = self.new_delete_request( 'service_graphs', graph['service_graph']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(204, res.status_int) driver_manager = self.fake_driver_manager (driver_manager.delete_service_graph_precommit .assert_called_once_with(mock.ANY)) (driver_manager.delete_service_graph_postcommit .assert_called_once_with(mock.ANY)) self.assertIsInstance( self.plugin_context_precommit, sfc_ctx.ServiceGraphContext ) self.assertIsInstance( self.plugin_context_postcommit, sfc_ctx.ServiceGraphContext ) self.assertIn('service_graph', graph) self.assertEqual(self.plugin_context_precommit.current, graph['service_graph']) self.assertEqual(self.plugin_context_postcommit.current, graph['service_graph']) def _test_delete_service_graph_driver_manager_exception(self): with self.port_pair_group( port_pair_group={}, do_delete=False ) as pg1, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg2: with self.port_chain( port_chain={ 'port_pair_groups': [ pg1['port_pair_group']['id'] ] }, do_delete=False ) as pc1, self.port_chain( port_chain={ 'port_pair_groups': [ pg2['port_pair_group']['id'] ] }, do_delete=False ) as pc2: with self.service_graph( service_graph={ 'name': 'test1', 'port_chains': { pc1['port_chain']['id']: [ pc2['port_chain']['id'] ] } }, do_delete=False ) as graph: req = self.new_delete_request( 'service_graphs', graph['service_graph']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(500, res.status_int) self._test_list_resources('service_graph', [graph]) def test_delete_service_graph_driver_precommit_manager_exception(self): self.fake_driver_manager.delete_service_graph_precommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='delete_service_graph_precommit' ) ) self._test_delete_service_graph_driver_manager_exception() def test_delete_service_graph_driver_postcommit_manager_exception(self): self.fake_driver_manager.delete_service_graph_precommit = mock.Mock( side_effect=sfc_exc.SfcDriverError( method='delete_service_graph_postcommit' ) ) self._test_delete_service_graph_driver_manager_exception() networking-sfc-10.0.0/networking_sfc/tests/unit/extensions/0000775000175000017500000000000013656750461024131 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/extensions/test_flowclassifier.py0000664000175000017500000010360413656750333030560 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from neutron.api.v2 import resource as api_res_log from neutron import manager from neutron.notifiers import nova as nova_log from neutron.tests.unit.api.v2 import test_base as test_api_v2 from neutron.tests.unit.extensions import base as test_api_v2_extension from neutron_lib import constants as const from oslo_config import cfg from oslo_utils import uuidutils from webob import exc import webtest from networking_sfc.extensions import flowclassifier as fc_ext _uuid = uuidutils.generate_uuid _get_path = test_api_v2._get_path FLOW_CLASSIFIER_PATH = (fc_ext.FLOW_CLASSIFIER_PREFIX[1:] + '/' + fc_ext.FLOW_CLASSIFIER_EXT + 's') class FlowClassifierExtensionTestCase( test_api_v2_extension.ExtensionTestCase ): fmt = 'json' def setUp(self): self._mock_unnecessary_logging() super(FlowClassifierExtensionTestCase, self).setUp() self.setup_extension( 'networking_sfc.extensions.flowclassifier.' 'FlowClassifierPluginBase', fc_ext.FLOW_CLASSIFIER_EXT, fc_ext.Flowclassifier, fc_ext.FLOW_CLASSIFIER_PREFIX[1:], plural_mappings={} ) def _mock_unnecessary_logging(self): mock_log_cfg_p = mock.patch.object(cfg, 'LOG') self.mock_log_cfg = mock_log_cfg_p.start() mock_log_manager_p = mock.patch.object(manager, 'LOG') self.mock_log_manager = mock_log_manager_p.start() mock_log_nova_p = mock.patch.object(nova_log, 'LOG') self.mock_log_nova = mock_log_nova_p.start() mock_log_api_res_log_p = mock.patch.object(api_res_log, 'LOG') self.mock_log_api_res_log = mock_log_api_res_log_p.start() def _get_expected_flow_classifier(self, data): source_port_range_min = data['flow_classifier'].get( 'source_port_range_min') if source_port_range_min is not None: source_port_range_min = int(source_port_range_min) source_port_range_max = data['flow_classifier'].get( 'source_port_range_max') if source_port_range_max is not None: source_port_range_max = int(source_port_range_max) destination_port_range_min = data['flow_classifier'].get( 'destination_port_range_min') if destination_port_range_min is not None: destination_port_range_min = int(destination_port_range_min) destination_port_range_max = data['flow_classifier'].get( 'destination_port_range_max') if destination_port_range_max is not None: destination_port_range_max = int(destination_port_range_max) return {'flow_classifier': { 'name': data['flow_classifier'].get('name') or '', 'description': data['flow_classifier'].get('description') or '', 'tenant_id': data['flow_classifier']['tenant_id'], 'project_id': data['flow_classifier']['project_id'], 'source_port_range_min': source_port_range_min, 'source_port_range_max': source_port_range_max, 'destination_port_range_min': destination_port_range_min, 'destination_port_range_max': destination_port_range_max, 'l7_parameters': data['flow_classifier'].get( 'l7_parameters') or {}, 'destination_ip_prefix': data['flow_classifier'].get( 'destination_ip_prefix'), 'source_ip_prefix': data['flow_classifier'].get( 'source_ip_prefix'), 'logical_source_port': data['flow_classifier'].get( 'logical_source_port'), 'logical_destination_port': data['flow_classifier'].get( 'logical_destination_port'), 'ethertype': data['flow_classifier'].get( 'ethertype') or 'IPv4', 'protocol': data['flow_classifier'].get( 'protocol') }} def test_create_flow_classifier(self): flowclassifier_id = _uuid() tenant_id = _uuid() data = {'flow_classifier': { 'logical_source_port': _uuid(), 'tenant_id': tenant_id, 'project_id': tenant_id, }} expected_data = self._get_expected_flow_classifier(data) return_value = copy.copy(expected_data['flow_classifier']) return_value.update({'id': flowclassifier_id}) instance = self.plugin.return_value instance.create_flow_classifier.return_value = return_value res = self.api.post( _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_flow_classifier.assert_called_with( mock.ANY, flow_classifier=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('flow_classifier', res) self.assertEqual(return_value, res['flow_classifier']) def test_create_flow_classifier_source_port_range(self): for source_port_range_min in [None, 100, '100']: for source_port_range_max in [None, 200, '200']: flowclassifier_id = _uuid() tenant_id = _uuid() data = {'flow_classifier': { 'source_port_range_min': source_port_range_min, 'source_port_range_max': source_port_range_max, 'logical_source_port': _uuid(), 'tenant_id': tenant_id, 'project_id': tenant_id, }} expected_data = self._get_expected_flow_classifier(data) return_value = copy.copy(expected_data['flow_classifier']) return_value.update({'id': flowclassifier_id}) instance = self.plugin.return_value instance.create_flow_classifier.return_value = return_value res = self.api.post( _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_flow_classifier.assert_called_with( mock.ANY, flow_classifier=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('flow_classifier', res) self.assertEqual(return_value, res['flow_classifier']) def test_create_flow_classifier_destination_port_range(self): for destination_port_range_min in [None, 100, '100']: for destination_port_range_max in [None, 200, '200']: flowclassifier_id = _uuid() tenant_id = _uuid() data = {'flow_classifier': { 'destination_port_range_min': destination_port_range_min, 'destination_port_range_max': destination_port_range_max, 'logical_source_port': _uuid(), 'tenant_id': tenant_id, 'project_id': tenant_id, }} expected_data = self._get_expected_flow_classifier(data) return_value = copy.copy(expected_data['flow_classifier']) return_value.update({'id': flowclassifier_id}) instance = self.plugin.return_value instance.create_flow_classifier.return_value = return_value res = self.api.post( _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_flow_classifier.assert_called_with( mock.ANY, flow_classifier=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('flow_classifier', res) self.assertEqual(return_value, res['flow_classifier']) def test_create_flow_classifier_source_ip_prefix(self): for logical_source_ip_prefix in [ None, '10.0.0.0/8' ]: flowclassifier_id = _uuid() tenant_id = _uuid() data = {'flow_classifier': { 'source_ip_prefix': logical_source_ip_prefix, 'logical_source_port': _uuid(), 'tenant_id': tenant_id, 'project_id': tenant_id, }} expected_data = self._get_expected_flow_classifier(data) return_value = copy.copy(expected_data['flow_classifier']) return_value.update({'id': flowclassifier_id}) instance = self.plugin.return_value instance.create_flow_classifier.return_value = return_value res = self.api.post( _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_flow_classifier.assert_called_with( mock.ANY, flow_classifier=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('flow_classifier', res) self.assertEqual(return_value, res['flow_classifier']) def test_create_flow_classifier_destination_ip_prefix(self): for logical_destination_ip_prefix in [ None, '10.0.0.0/8' ]: flowclassifier_id = _uuid() tenant_id = _uuid() data = {'flow_classifier': { 'destination_ip_prefix': logical_destination_ip_prefix, 'logical_source_port': _uuid(), 'tenant_id': tenant_id, 'project_id': tenant_id, }} expected_data = self._get_expected_flow_classifier(data) return_value = copy.copy(expected_data['flow_classifier']) return_value.update({'id': flowclassifier_id}) instance = self.plugin.return_value instance.create_flow_classifier.return_value = return_value res = self.api.post( _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_flow_classifier.assert_called_with( mock.ANY, flow_classifier=expected_data) self.assertEqual(res.status_int, exc.HTTPCreated.code) res = self.deserialize(res) self.assertIn('flow_classifier', res) self.assertEqual(return_value, res['flow_classifier']) def test_create_flow_classifier_logical_source_port(self): for logical_source_port in [ None, _uuid() ]: flowclassifier_id = _uuid() tenant_id = _uuid() data = {'flow_classifier': { 'logical_source_port': logical_source_port, 'tenant_id': tenant_id, 'project_id': tenant_id, }} expected_data = self._get_expected_flow_classifier(data) return_value = copy.copy(expected_data['flow_classifier']) return_value.update({'id': flowclassifier_id}) instance = self.plugin.return_value instance.create_flow_classifier.return_value = return_value res = self.api.post( _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_flow_classifier.assert_called_with( mock.ANY, flow_classifier=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('flow_classifier', res) self.assertEqual(return_value, res['flow_classifier']) def test_create_flow_classifier_logical_destination_port(self): for logical_destination_port in [ None, _uuid() ]: flowclassifier_id = _uuid() tenant_id = _uuid() data = {'flow_classifier': { 'logical_destination_port': logical_destination_port, 'tenant_id': tenant_id, 'project_id': tenant_id, }} expected_data = self._get_expected_flow_classifier(data) return_value = copy.copy(expected_data['flow_classifier']) return_value.update({'id': flowclassifier_id}) instance = self.plugin.return_value instance.create_flow_classifier.return_value = return_value res = self.api.post( _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_flow_classifier.assert_called_with( mock.ANY, flow_classifier=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('flow_classifier', res) self.assertEqual(return_value, res['flow_classifier']) def test_create_flow_classifier_l7_parameters(self): for l7_parameters in [None, {}]: flowclassifier_id = _uuid() tenant_id = _uuid() data = {'flow_classifier': { 'logical_source_port': _uuid(), 'tenant_id': tenant_id, 'project_id': tenant_id, 'l7_parameters': l7_parameters }} expected_data = self._get_expected_flow_classifier(data) return_value = copy.copy(expected_data['flow_classifier']) return_value.update({'id': flowclassifier_id}) instance = self.plugin.return_value instance.create_flow_classifier.return_value = return_value res = self.api.post( _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_flow_classifier.assert_called_with( mock.ANY, flow_classifier=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('flow_classifier', res) self.assertEqual(return_value, res['flow_classifier']) def test_create_flow_classifier_ethertype(self): for ethertype in [None, 'IPv4', 'IPv6']: flowclassifier_id = _uuid() tenant_id = _uuid() data = {'flow_classifier': { 'logical_source_port': _uuid(), 'tenant_id': tenant_id, 'project_id': tenant_id, 'ethertype': ethertype }} expected_data = self._get_expected_flow_classifier(data) return_value = copy.copy(expected_data['flow_classifier']) return_value.update({'id': flowclassifier_id}) instance = self.plugin.return_value instance.create_flow_classifier.return_value = return_value res = self.api.post( _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_flow_classifier.assert_called_with( mock.ANY, flow_classifier=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('flow_classifier', res) self.assertEqual(return_value, res['flow_classifier']) def test_create_flow_classifier_protocol(self): for protocol in [ None, const.PROTO_NAME_TCP, const.PROTO_NAME_UDP, const.PROTO_NAME_ICMP ]: flowclassifier_id = _uuid() tenant_id = _uuid() data = {'flow_classifier': { 'logical_source_port': _uuid(), 'tenant_id': tenant_id, 'project_id': tenant_id, 'protocol': protocol }} expected_data = self._get_expected_flow_classifier(data) return_value = copy.copy(expected_data['flow_classifier']) return_value.update({'id': flowclassifier_id}) instance = self.plugin.return_value instance.create_flow_classifier.return_value = return_value res = self.api.post( _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_flow_classifier.assert_called_with( mock.ANY, flow_classifier=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('flow_classifier', res) self.assertEqual(return_value, res['flow_classifier']) def test_create_flow_classifier_all_fields(self): flowclassifier_id = _uuid() tenant_id = _uuid() data = {'flow_classifier': { 'name': 'test1', 'description': 'desc', 'tenant_id': tenant_id, 'project_id': tenant_id, 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 100, 'destination_port_range_max': 200, 'l7_parameters': {}, 'destination_ip_prefix': '10.0.0.0/8', 'source_ip_prefix': '10.0.0.0/8', 'logical_source_port': _uuid(), 'logical_destination_port': _uuid(), 'ethertype': None, 'protocol': None }} expected_data = self._get_expected_flow_classifier(data) return_value = copy.copy(expected_data['flow_classifier']) return_value.update({'id': flowclassifier_id}) instance = self.plugin.return_value instance.create_flow_classifier.return_value = return_value res = self.api.post( _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_flow_classifier.assert_called_with( mock.ANY, flow_classifier=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('flow_classifier', res) self.assertEqual(return_value, res['flow_classifier']) def test_create_flow_classifier_invalid_l7_parameters(self): tenant_id = _uuid() data = {'flow_classifier': { 'logical_source_port': _uuid(), 'l7_parameters': {'abc': 'def'}, 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_flow_classifier_invalid_protocol(self): tenant_id = _uuid() data = {'flow_classifier': { 'logical_source_port': _uuid(), 'protocol': 'unknown', 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_flow_classifier_invalid_ethertype(self): tenant_id = _uuid() data = {'flow_classifier': { 'logical_source_port': _uuid(), 'ethertype': 'unknown', 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_flow_classifier_port_small(self): tenant_id = _uuid() data = {'flow_classifier': { 'logical_source_port': _uuid(), 'source_port_range_min': -1, 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_flow_classifier_port_large(self): tenant_id = _uuid() data = {'flow_classifier': { 'logical_source_port': _uuid(), 'source_port_range_min': 65536, 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_flow_classifier_ip_prefix_no_cidr(self): tenant_id = _uuid() data = {'flow_classifier': { 'source_ip_prefix': '10.0.0.0', 'logical_source_port': _uuid(), 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_flow_classifier_ip_prefix_invalid_cidr(self): tenant_id = _uuid() data = {'flow_classifier': { 'source_ip_prefix': '10.0.0.0/33', 'logical_source_port': _uuid(), 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_flow_classifier_port_id_nouuid(self): tenant_id = _uuid() data = {'flow_classifier': { 'logical_source_port': 'unknown', 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_flow_classifier_list(self): tenant_id = _uuid() flowclassifier_id = _uuid() return_value = [{ 'tenant_id': tenant_id, 'project_id': tenant_id, 'id': flowclassifier_id }] instance = self.plugin.return_value instance.get_flow_classifiers.return_value = return_value res = self.api.get( _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt)) instance.get_flow_classifiers.assert_called_with( mock.ANY, fields=mock.ANY, filters=mock.ANY ) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('flow_classifiers', res) self.assertEqual(return_value, res['flow_classifiers']) def test_flow_classifier_list_all_fields(self): tenant_id = _uuid() flowclassifier_id = _uuid() return_value = [{ 'name': 'abc', 'description': 'abc', 'ethertype': 'IPv4', 'protocol': const.PROTO_NAME_TCP, 'source_ip_prefix': '10.0.0.0/8', 'destination_ip_prefix': '10.0.0.0/8', 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 100, 'destination_port_range_max': 200, 'logical_source_port': _uuid(), 'logical_destination_port': _uuid(), 'l7_parameters': {}, 'tenant_id': tenant_id, 'project_id': tenant_id, 'id': flowclassifier_id }] instance = self.plugin.return_value instance.get_flow_classifiers.return_value = return_value res = self.api.get( _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt)) instance.get_flow_classifiers.assert_called_with( mock.ANY, fields=mock.ANY, filters=mock.ANY ) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('flow_classifiers', res) self.assertEqual(return_value, res['flow_classifiers']) def test_flow_classifier_list_unknown_fields(self): tenant_id = _uuid() flowclassifier_id = _uuid() return_value = [{ 'logical_source_port': _uuid(), 'new_key': 'value', 'tenant_id': tenant_id, 'project_id': tenant_id, 'id': flowclassifier_id }] expected_return = copy.copy(return_value) for item in expected_return: del item['new_key'] instance = self.plugin.return_value instance.get_flow_classifiers.return_value = return_value res = self.api.get( _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt)) instance.get_flow_classifiers.assert_called_with( mock.ANY, fields=mock.ANY, filters=mock.ANY ) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('flow_classifiers', res) self.assertEqual(expected_return, res['flow_classifiers']) def test_flow_classifier_get(self): tenant_id = _uuid() flowclassifier_id = _uuid() return_value = { 'logical_source_port': _uuid(), 'tenant_id': tenant_id, 'project_id': tenant_id, 'id': flowclassifier_id } instance = self.plugin.return_value instance.get_flow_classifier.return_value = return_value res = self.api.get( _get_path( FLOW_CLASSIFIER_PATH, id=flowclassifier_id, fmt=self.fmt ) ) instance.get_flow_classifier.assert_called_with( mock.ANY, flowclassifier_id, fields=mock.ANY ) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('flow_classifier', res) self.assertEqual(return_value, res['flow_classifier']) def test_flow_classifier_update(self): tenant_id = _uuid() flowclassifier_id = _uuid() update_data = {'flow_classifier': { 'name': 'new_name', 'description': 'new_desc', }} return_value = { 'tenant_id': tenant_id, 'project_id': tenant_id, 'id': flowclassifier_id } instance = self.plugin.return_value instance.update_flow_classifier.return_value = return_value res = self.api.put( _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id, fmt=self.fmt), self.serialize(update_data)) instance.update_flow_classifier.assert_called_with( mock.ANY, flowclassifier_id, flow_classifier=update_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('flow_classifier', res) self.assertEqual(return_value, res['flow_classifier']) def test_flow_classifier_update_source_port_range_min(self): tenant_id = _uuid() flowclassifier_id = _uuid() data = {'flow_classifier': { 'source_port_range_min': 100, 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_flow_classifier_update_source_port_range_max(self): tenant_id = _uuid() flowclassifier_id = _uuid() data = {'flow_classifier': { 'source_port_range_max': 100, 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_flow_classifier_update_destination_port_range_min(self): tenant_id = _uuid() flowclassifier_id = _uuid() data = {'flow_classifier': { 'destination_port_range_min': 100, 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_flow_classifier_update_destination_port_range_max(self): tenant_id = _uuid() flowclassifier_id = _uuid() data = {'flow_classifier': { 'destination_port_range_max': 100, 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_flow_classifier_update_source_ip_prefix(self): tenant_id = _uuid() flowclassifier_id = _uuid() data = {'flow_classifier': { 'source_ip_prefix': '10.0.0.0/8', 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_flow_classifier_update_destination_ip_prefix(self): tenant_id = _uuid() flowclassifier_id = _uuid() data = {'flow_classifier': { 'destination_ip_prefix': '10.0.0.0/8', 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_flow_classifier_update_logical_source_port(self): tenant_id = _uuid() flowclassifier_id = _uuid() data = {'flow_classifier': { 'logical_source_port': _uuid(), 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_flow_classifier_update_logical_destination_port(self): tenant_id = _uuid() flowclassifier_id = _uuid() data = {'flow_classifier': { 'logical_destination_port': _uuid(), 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_flow_classifier_update_ethertype(self): tenant_id = _uuid() flowclassifier_id = _uuid() data = {'flow_classifier': { 'ethertype': None, 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_flow_classifier_update_protocol(self): tenant_id = _uuid() flowclassifier_id = _uuid() data = {'flow_classifier': { 'protococol': None, 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_flow_classifier_update_l7_parameters(self): tenant_id = _uuid() flowclassifier_id = _uuid() data = {'flow_classifier': { 'l7_parameters': {}, 'tenant_id': tenant_id, 'project_id': tenant_id, }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_flow_classifier_delete(self): self._test_entity_delete('flow_classifier') networking-sfc-10.0.0/networking_sfc/tests/unit/extensions/__init__.py0000664000175000017500000000000013656750333026226 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/extensions/test_servicegraph.py0000664000175000017500000001633213656750333030227 0ustar zuulzuul00000000000000# Copyright 2017 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from neutron.api.v2 import resource as api_res_log from neutron import manager from neutron.notifiers import nova as nova_log from neutron.tests.unit.api.v2 import test_base as test_api_v2 from neutron.tests.unit.extensions import base as test_api_v2_extension from oslo_config import cfg from oslo_utils import uuidutils from webob import exc import webtest from networking_sfc.extensions import servicegraph as sg_ext from networking_sfc.extensions import sfc as sfc_ext _uuid = uuidutils.generate_uuid _get_path = test_api_v2._get_path SERVICE_GRAPH_PATH = (sg_ext.SG_PREFIX[1:] + '/service_graphs') class ServiceGraphExtensionTestCase(test_api_v2_extension.ExtensionTestCase): fmt = 'json' def setUp(self): self._mock_unnecessary_logging() super(ServiceGraphExtensionTestCase, self).setUp() self.setup_extension( 'networking_sfc.extensions.servicegraph.ServiceGraphPluginBase', sfc_ext.SFC_EXT, sg_ext.Servicegraph, sg_ext.SG_PREFIX[1:], plural_mappings={} ) def _mock_unnecessary_logging(self): mock_log_cfg_p = mock.patch.object(cfg, 'LOG') self.mock_log_cfg = mock_log_cfg_p.start() mock_log_manager_p = mock.patch.object(manager, 'LOG') self.mock_log_manager = mock_log_manager_p.start() mock_log_nova_p = mock.patch.object(nova_log, 'LOG') self.mock_log_nova = mock_log_nova_p.start() mock_log_api_res_log_p = mock.patch.object(api_res_log, 'LOG') self.mock_log_api_res_log = mock_log_api_res_log_p.start() @staticmethod def _get_expected_service_graph(data): service_graph = data['service_graph'] ret = {'service_graph': { 'description': service_graph.get('description') or '', 'name': service_graph.get('name') or '', 'port_chains': service_graph['port_chains'], 'tenant_id': service_graph['project_id'], 'project_id': service_graph['project_id'] }} return ret def _test_create_service_graph(self, **kwargs): tenant_id = _uuid() graph_data = { 'port_chains': {_uuid(): [_uuid()]}, 'project_id': tenant_id } graph_data.update(kwargs) data = {'service_graph': graph_data} expected_data = self._get_expected_service_graph(data) return_value = copy.copy(expected_data['service_graph']) return_value.update({'id': _uuid()}) instance = self.plugin.return_value instance.create_service_graph.return_value = return_value res = self.api.post(_get_path(SERVICE_GRAPH_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_service_graph.assert_called_with( mock.ANY, service_graph=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('service_graph', res) self.assertEqual(return_value, res['service_graph']) def test_create_service_graph(self): self._test_create_service_graph() def test_create_service_graph_complex_dict(self): port_chains = {_uuid(): [_uuid()], _uuid(): [_uuid(), _uuid()]} self._test_create_service_graph(description='desc', name='graph1', port_chains=port_chains) def test_list_service_graph(self): service_graph_id = _uuid() tenant_id = _uuid() return_value = [{ 'project_id': tenant_id, 'id': service_graph_id }] instance = self.plugin.return_value instance.get_service_graphs.return_value = return_value res = self.api.get(_get_path(SERVICE_GRAPH_PATH, fmt=self.fmt)) instance.get_service_graphs.assert_called_with( mock.ANY, fields=mock.ANY, filters=mock.ANY ) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('service_graphs', res) self.assertEqual(return_value, res['service_graphs']) def test_get_service_graph(self): service_graph_id = _uuid() tenant_id = _uuid() return_value = { 'project_id': tenant_id, 'id': service_graph_id } instance = self.plugin.return_value instance.get_service_graph.return_value = return_value res = self.api.get(_get_path(SERVICE_GRAPH_PATH, id=service_graph_id, fmt=self.fmt)) instance.get_service_graph.assert_called_with( mock.ANY, service_graph_id, fields=mock.ANY ) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('service_graph', res) self.assertEqual(return_value, res['service_graph']) def test_update_service_graph(self): service_graph_id = _uuid() tenant_id = _uuid() update_data = {'service_graph': { 'name': 'new_name', 'description': 'new_desc' }} return_value = { 'project_id': tenant_id, 'id': service_graph_id } instance = self.plugin.return_value instance.update_service_graph.return_value = return_value res = self.api.put(_get_path(SERVICE_GRAPH_PATH, id=service_graph_id, fmt=self.fmt), self.serialize(update_data)) instance.update_service_graph.assert_called_with( mock.ANY, service_graph_id, service_graph=update_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('service_graph', res) self.assertEqual(return_value, res['service_graph']) def test_update_service_graph_with_port_chains(self): # API currently disallows rebuilding graphs, so we test this service_graph_id = _uuid() update_data = {'service_graph': { 'name': 'new_name', 'description': 'new_desc', 'port_chains': {_uuid(): [_uuid()]} }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(SERVICE_GRAPH_PATH, id=service_graph_id, fmt=self.fmt), self.serialize(update_data), content_type='application/%s' % self.fmt) def test_delete_service_graph(self): self._test_entity_delete('service_graph') networking-sfc-10.0.0/networking_sfc/tests/unit/extensions/test_tap.py0000664000175000017500000001212213656750333026322 0ustar zuulzuul00000000000000# Copyright (c) 2017 One Convergence Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from networking_sfc.extensions import sfc as sfc_ext from networking_sfc.extensions import tap as tap_ext from networking_sfc.tests.unit.extensions import test_sfc as test_sfc_ext from oslo_utils import uuidutils from webob import exc from neutron.tests.unit.api.v2 import test_base as test_api_v2 import webtest _uuid = uuidutils.generate_uuid _get_path = test_api_v2._get_path class SFCTapExtensionTestCase(test_sfc_ext.SfcExtensionTestCase): def setUp(self): super(test_sfc_ext.SfcExtensionTestCase, self).setUp() attr_map = sfc_ext.RESOURCE_ATTRIBUTE_MAP attr_map['port_pair_groups'].update( tap_ext.EXTENDED_ATTRIBUTES_2_0['port_pair_groups']) self.setup_extension( 'networking_sfc.extensions.sfc.SfcPluginBase', sfc_ext.SFC_EXT, sfc_ext.Sfc, sfc_ext.SFC_PREFIX[1:], plural_mappings={} ) def _get_expected_port_pair_group(self, data): ret = super(SFCTapExtensionTestCase, self)._get_expected_port_pair_group(data) ret['port_pair_group'].update( tap_enabled=data['port_pair_group'].get('tap_enabled', False)) return ret def test_create_port_pair_group_with_default_fields(self): portpairgroup_id = _uuid() tenant_id = _uuid() data = {'port_pair_group': { 'tenant_id': tenant_id, 'project_id': tenant_id, }} expected_data = self._get_expected_port_pair_group(data) return_value = copy.copy(expected_data['port_pair_group']) return_value.update({'id': portpairgroup_id}) instance = self.plugin.return_value instance.create_port_pair_group.return_value = return_value res = self.api.post( _get_path(test_sfc_ext.PORT_PAIR_GROUP_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_port_pair_group.assert_called_with( mock.ANY, port_pair_group=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('port_pair_group', res) self.assertEqual(return_value, res['port_pair_group']) self.assertEqual(False, res['port_pair_group']['tap_enabled']) def test_create_port_pair_group_with_tap_enabled(self): portpairgroup_id = _uuid() tenant_id = _uuid() data = {'port_pair_group': { 'description': 'desc', 'name': 'test1', 'port_pairs': [], 'port_pair_group_parameters': {}, 'tap_enabled': True, 'tenant_id': tenant_id, 'project_id': tenant_id }} expected_data = self._get_expected_port_pair_group(data) return_value = copy.copy(expected_data['port_pair_group']) return_value.update({'id': portpairgroup_id}) instance = self.plugin.return_value instance.create_port_pair_group.return_value = return_value res = self.api.post( _get_path(test_sfc_ext.PORT_PAIR_GROUP_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_port_pair_group.assert_called_with( mock.ANY, port_pair_group=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertEqual(True, res['port_pair_group']['tap_enabled']) def test_create_port_pair_group_invalid_tap_enabled_value(self): tenant_id = _uuid() data = {'port_pair_group': { 'port_pairs': [_uuid()], 'tap_enabled': 'two', 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(test_sfc_ext.PORT_PAIR_GROUP_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_update_port_pair_group_tap_enabled_field(self): portpairgroup_id = _uuid() data = {'port_pair_group': { 'tap_enabled': True }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(test_sfc_ext.PORT_PAIR_GROUP_PATH, id=portpairgroup_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) networking-sfc-10.0.0/networking_sfc/tests/unit/extensions/test_sfc.py0000664000175000017500000010652313656750333026322 0ustar zuulzuul00000000000000# Copyright 2017 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from neutron.api.v2 import resource as api_res_log from neutron import manager from neutron.notifiers import nova as nova_log from neutron.tests.unit.api.v2 import test_base as test_api_v2 from neutron.tests.unit.extensions import base as test_api_v2_extension from oslo_config import cfg from oslo_utils import uuidutils from webob import exc import webtest from networking_sfc.extensions import sfc as sfc_ext _uuid = uuidutils.generate_uuid _get_path = test_api_v2._get_path PORT_CHAIN_PATH = (sfc_ext.SFC_PREFIX[1:] + '/port_chains') PORT_PAIR_PATH = (sfc_ext.SFC_PREFIX[1:] + '/port_pairs') PORT_PAIR_GROUP_PATH = (sfc_ext.SFC_PREFIX[1:] + '/port_pair_groups') class SfcExtensionTestCase(test_api_v2_extension.ExtensionTestCase): fmt = 'json' def setUp(self): self._mock_unnecessary_logging() super(SfcExtensionTestCase, self).setUp() self.setup_extension( 'networking_sfc.extensions.sfc.SfcPluginBase', sfc_ext.SFC_EXT, sfc_ext.Sfc, sfc_ext.SFC_PREFIX[1:], plural_mappings={} ) def _mock_unnecessary_logging(self): mock_log_cfg_p = mock.patch.object(cfg, 'LOG') self.mock_log_cfg = mock_log_cfg_p.start() mock_log_manager_p = mock.patch.object(manager, 'LOG') self.mock_log_manager = mock_log_manager_p.start() mock_log_nova_p = mock.patch.object(nova_log, 'LOG') self.mock_log_nova = mock_log_nova_p.start() mock_log_api_res_log_p = mock.patch.object(api_res_log, 'LOG') self.mock_log_api_res_log = mock_log_api_res_log_p.start() @staticmethod def _get_expected_port_chain(data): port_chain = data['port_chain'] chain_params = port_chain.get('chain_parameters') or dict() chain_params.setdefault('correlation', 'mpls') chain_params.setdefault('symmetric', False) ret = {'port_chain': { 'description': port_chain.get('description') or '', 'name': port_chain.get('name') or '', 'port_pair_groups': port_chain['port_pair_groups'], 'chain_parameters': chain_params, 'flow_classifiers': port_chain.get( 'flow_classifiers') or [], 'tenant_id': port_chain['tenant_id'], 'project_id': port_chain['project_id'], 'chain_id': port_chain.get('chain_id') or 0 }} return ret def _test_create_port_chain(self, **kwargs): tenant_id = _uuid() port_chain_data = { 'port_pair_groups': [_uuid()], 'tenant_id': tenant_id, 'project_id': tenant_id } port_chain_data.update(kwargs) data = {'port_chain': port_chain_data} expected_data = self._get_expected_port_chain(data) return_value = copy.copy(expected_data['port_chain']) return_value.update({'id': _uuid()}) instance = self.plugin.return_value instance.create_port_chain.return_value = return_value res = self.api.post(_get_path(PORT_CHAIN_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_port_chain.assert_called_with( mock.ANY, port_chain=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('port_chain', res) self.assertEqual(return_value, res['port_chain']) def test_create_port_chain(self): self._test_create_port_chain() def test_create_port_chain_all_fields(self): self._test_create_port_chain(description='desc', name='test1', chain_parameters={'symmetric': False, 'correlation': 'mpls'}, flow_classifiers=[]) def test_create_port_chain_all_fields_with_symmetric(self): self._test_create_port_chain(description='desc', name='test1', chain_parameters={'symmetric': True, 'correlation': 'mpls'}, flow_classifiers=[]) def test_create_port_chain_none_chain_parameters(self): self._test_create_port_chain(chain_parameters=None) def test_create_port_chain_empty_chain_parameters(self): self._test_create_port_chain(chain_parameters={}) def test_create_port_chain_multiple_chain_parameters(self): self._test_create_port_chain(chain_parameters={ 'correlation': 'mpls', 'symmetric': True }) def test_create_port_chain_empty_port_pair_groups(self): tenant_id = _uuid() data = {'port_chain': { 'port_pair_groups': [], 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(PORT_CHAIN_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_port_chain_nonuuid_port_pair_groups(self): tenant_id = _uuid() data = {'port_chain': { 'port_pair_groups': ['nouuid'], 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(PORT_CHAIN_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_port_chain_nonuuid_flow_classifiers(self): tenant_id = _uuid() data = {'port_chain': { 'port_pair_groups': [_uuid()], 'flow_classifiers': ['nouuid'], 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(PORT_CHAIN_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_port_chain_invalid_chain_parameters(self): tenant_id = _uuid() data = {'port_chain': { 'port_pair_groups': [_uuid()], 'chain_parameters': {'abc': 'def'}, 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(PORT_CHAIN_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_port_chain_invalid_chain_parameters_correlation(self): tenant_id = _uuid() data = {'port_chain': { 'port_pair_groups': [_uuid()], 'chain_parameters': {'symmetric': False, 'correlation': 'def'}, 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(PORT_CHAIN_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_port_chain_invalid_chain_parameters_symmetric(self): tenant_id = _uuid() data = {'port_chain': { 'port_pair_groups': [_uuid()], 'chain_parameters': {'symmetric': 'abc', 'correlation': 'mpls'}, 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(PORT_CHAIN_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_port_chain_list(self): portchain_id = _uuid() tenant_id = _uuid() return_value = [{ 'tenant_id': tenant_id, 'project_id': tenant_id, 'id': portchain_id }] instance = self.plugin.return_value instance.get_port_chains.return_value = return_value res = self.api.get(_get_path(PORT_CHAIN_PATH, fmt=self.fmt)) instance.get_port_chains.assert_called_with( mock.ANY, fields=mock.ANY, filters=mock.ANY ) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('port_chains', res) self.assertEqual(return_value, res['port_chains']) def test_port_chain_get(self): portchain_id = _uuid() tenant_id = _uuid() return_value = { 'tenant_id': tenant_id, 'project_id': tenant_id, 'id': portchain_id } instance = self.plugin.return_value instance.get_port_chain.return_value = return_value res = self.api.get(_get_path(PORT_CHAIN_PATH, id=portchain_id, fmt=self.fmt)) instance.get_port_chain.assert_called_with( mock.ANY, portchain_id, fields=mock.ANY ) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('port_chain', res) self.assertEqual(return_value, res['port_chain']) def test_port_chain_update(self): portchain_id = _uuid() tenant_id = _uuid() update_data = {'port_chain': { 'name': 'new_name', 'description': 'new_desc', 'flow_classifiers': [_uuid()], 'port_pair_groups': [_uuid()] }} return_value = { 'tenant_id': tenant_id, 'project_id': tenant_id, 'id': portchain_id } instance = self.plugin.return_value instance.update_port_chain.return_value = return_value res = self.api.put(_get_path(PORT_CHAIN_PATH, id=portchain_id, fmt=self.fmt), self.serialize(update_data)) instance.update_port_chain.assert_called_with( mock.ANY, portchain_id, port_chain=update_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('port_chain', res) self.assertEqual(return_value, res['port_chain']) def test_port_chain_update_nonuuid_flow_classifiers(self): portchain_id = _uuid() data = {'port_chain': { 'flow_classifiers': ['nouuid'], }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(PORT_CHAIN_PATH, id=portchain_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_port_chain_update_nonuuid_port_pair_groups(self): portchain_id = _uuid() update_data = {'port_chain': { 'port_pair_groups': ['nouuid'] }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(PORT_CHAIN_PATH, id=portchain_id, fmt=self.fmt), self.serialize(update_data), content_type='application/%s' % self.fmt ) def test_port_chain_update_chain_parameters(self): portchain_id = _uuid() update_data = {'port_chain': { 'chain_parameters': {} }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(PORT_CHAIN_PATH, id=portchain_id, fmt=self.fmt), self.serialize(update_data) ) def test_port_chain_delete(self): self._test_entity_delete('port_chain') def _get_expected_port_pair_group(self, data): port_pair_group = data['port_pair_group'] ret = {'port_pair_group': { 'description': port_pair_group.get('description') or '', 'name': port_pair_group.get('name') or '', 'port_pairs': port_pair_group.get('port_pairs') or [], 'tenant_id': port_pair_group['tenant_id'], 'project_id': port_pair_group['project_id'], 'port_pair_group_parameters': port_pair_group.get( 'port_pair_group_parameters' ) or {'lb_fields': [], 'ppg_n_tuple_mapping': {'ingress_n_tuple': {}, 'egress_n_tuple': {}} } }} if port_pair_group.get('group_id'): ret['port_pair_group']['group_id'] = port_pair_group['group_id'] return ret def test_create_port_pair_group(self): portpairgroup_id = _uuid() tenant_id = _uuid() data = {'port_pair_group': { 'tenant_id': tenant_id, 'project_id': tenant_id }} expected_data = self._get_expected_port_pair_group(data) return_value = copy.copy(expected_data['port_pair_group']) return_value.update({'id': portpairgroup_id}) instance = self.plugin.return_value instance.create_port_pair_group.return_value = return_value res = self.api.post( _get_path(PORT_PAIR_GROUP_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_port_pair_group.assert_called_with( mock.ANY, port_pair_group=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('port_pair_group', res) self.assertEqual(return_value, res['port_pair_group']) def test_create_port_pair_group_all_fields(self): portpairgroup_id = _uuid() tenant_id = _uuid() data = {'port_pair_group': { 'description': 'desc', 'name': 'test1', 'port_pairs': [], 'port_pair_group_parameters': {}, 'tenant_id': tenant_id, 'project_id': tenant_id }} expected_data = self._get_expected_port_pair_group(data) return_value = copy.copy(expected_data['port_pair_group']) return_value.update({'id': portpairgroup_id}) instance = self.plugin.return_value instance.create_port_pair_group.return_value = return_value res = self.api.post( _get_path(PORT_PAIR_GROUP_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_port_pair_group.assert_called_with( mock.ANY, port_pair_group=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('port_pair_group', res) self.assertEqual(return_value, res['port_pair_group']) def test_create_port_pair_group_none_parameters(self): portpairgroup_id = _uuid() tenant_id = _uuid() data = {'port_pair_group': { 'port_pairs': [_uuid()], 'port_pair_group_parameters': None, 'tenant_id': tenant_id, 'project_id': tenant_id }} expected_data = self._get_expected_port_pair_group(data) return_value = copy.copy(expected_data['port_pair_group']) return_value.update({'id': portpairgroup_id}) instance = self.plugin.return_value instance.create_port_pair_group.return_value = return_value res = self.api.post(_get_path(PORT_PAIR_GROUP_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_port_pair_group.assert_called_with( mock.ANY, port_pair_group=expected_data) self.assertEqual(res.status_int, exc.HTTPCreated.code) res = self.deserialize(res) self.assertIn('port_pair_group', res) self.assertEqual(return_value, res['port_pair_group']) def test_create_port_pair_group_empty_parameters(self): portpairgroup_id = _uuid() tenant_id = _uuid() data = {'port_pair_group': { 'port_pairs': [_uuid()], 'port_pair_group_parameters': {}, 'tenant_id': tenant_id, 'project_id': tenant_id }} expected_data = self._get_expected_port_pair_group(data) return_value = copy.copy(expected_data['port_pair_group']) return_value.update({'id': portpairgroup_id}) instance = self.plugin.return_value instance.create_port_pair_group.return_value = return_value res = self.api.post(_get_path(PORT_PAIR_GROUP_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_port_pair_group.assert_called_with( mock.ANY, port_pair_group=expected_data) self.assertEqual(res.status_int, exc.HTTPCreated.code) res = self.deserialize(res) self.assertIn('port_pair_group', res) self.assertEqual(return_value, res['port_pair_group']) def test_create_port_pair_group_invalid_parameters(self): tenant_id = _uuid() data = {'port_pair_group': { 'port_pairs': [_uuid()], 'port_pair_group_parameters': {'abc': 'def'}, 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(PORT_PAIR_GROUP_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_port_pair_group_invalid_lb_fields_type(self): tenant_id = _uuid() data = {'port_pair_group': { 'port_pairs': [_uuid()], 'port_pair_group_parameters': {'lb_fields': 'ip_src'}, 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(PORT_PAIR_GROUP_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_port_pair_group_invalid_lb_fields(self): tenant_id = _uuid() data = {'port_pair_group': { 'port_pairs': [_uuid()], 'port_pair_group_parameters': {'lb_fields': ['def']}, 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(PORT_PAIR_GROUP_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_port_pair_group_invalid_ppg_n_tuple_mapping_key(self): tenant_id = _uuid() data = {'port_pair_group': { 'port_pairs': [_uuid()], 'port_pair_group_parameters': { 'ppg_n_tuple_mapping': { 'ingress_n_tuple': {'sssource_ip_prefix': None}, 'egress_n_tuple': {'protool': None} } }, 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(PORT_PAIR_GROUP_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_port_pair_group_nonuuid_port_pairs(self): tenant_id = _uuid() data = {'port_pair_group': { 'port_pairs': ['nouuid'], 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(PORT_PAIR_GROUP_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_port_pair_group_list(self): portpairgroup_id = _uuid() tenant_id = _uuid() return_value = [{ 'tenant_id': tenant_id, 'project_id': tenant_id, 'id': portpairgroup_id }] instance = self.plugin.return_value instance.get_port_pair_groups.return_value = return_value res = self.api.get( _get_path(PORT_PAIR_GROUP_PATH, fmt=self.fmt)) instance.get_port_pair_groups.assert_called_with( mock.ANY, fields=mock.ANY, filters=mock.ANY ) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('port_pair_groups', res) self.assertEqual(return_value, res['port_pair_groups']) def test_port_pair_group_get(self): portpairgroup_id = _uuid() tenant_id = _uuid() return_value = { 'tenant_id': tenant_id, 'project_id': tenant_id, 'id': portpairgroup_id } instance = self.plugin.return_value instance.get_port_pair_group.return_value = return_value res = self.api.get(_get_path(PORT_PAIR_GROUP_PATH, id=portpairgroup_id, fmt=self.fmt)) instance.get_port_pair_group.assert_called_with( mock.ANY, portpairgroup_id, fields=mock.ANY ) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('port_pair_group', res) self.assertEqual(return_value, res['port_pair_group']) def test_port_pair_group_update(self): portpairgroup_id = _uuid() tenant_id = _uuid() update_data = {'port_pair_group': { 'name': 'new_name', 'description': 'new_desc', 'port_pairs': [_uuid()] }} return_value = { 'tenant_id': tenant_id, 'project_id': tenant_id, 'id': portpairgroup_id } instance = self.plugin.return_value instance.update_port_pair_group.return_value = return_value res = self.api.put( _get_path( PORT_PAIR_GROUP_PATH, id=portpairgroup_id, fmt=self.fmt), self.serialize(update_data)) instance.update_port_pair_group.assert_called_with( mock.ANY, portpairgroup_id, port_pair_group=update_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('port_pair_group', res) self.assertEqual(return_value, res['port_pair_group']) def test_port_pair_group_update_nonuuid_port_pairs(self): portpairgroup_id = _uuid() data = {'port_pair_group': { 'port_pairs': ['nouuid'] }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(PORT_PAIR_GROUP_PATH, id=portpairgroup_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_port_pair_group_delete(self): self._test_entity_delete('port_pair_group') def _get_expected_port_pair(self, data): return {'port_pair': { 'name': data['port_pair'].get('name') or '', 'description': data['port_pair'].get('description') or '', 'ingress': data['port_pair']['ingress'], 'egress': data['port_pair']['egress'], 'service_function_parameters': data['port_pair'].get( 'service_function_parameters') or { 'correlation': None, 'weight': 1}, 'tenant_id': data['port_pair']['tenant_id'], 'project_id': data['port_pair']['project_id'] }} def test_create_port_pair(self): portpair_id = _uuid() tenant_id = _uuid() data = {'port_pair': { 'ingress': _uuid(), 'egress': _uuid(), 'tenant_id': tenant_id, 'project_id': tenant_id }} expected_data = self._get_expected_port_pair(data) return_value = copy.copy(expected_data['port_pair']) return_value.update({'id': portpair_id}) instance = self.plugin.return_value instance.create_port_pair.return_value = return_value res = self.api.post(_get_path(PORT_PAIR_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_port_pair.assert_called_with( mock.ANY, port_pair=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('port_pair', res) self.assertEqual(return_value, res['port_pair']) def test_create_port_pair_all_fields(self): portpair_id = _uuid() tenant_id = _uuid() data = {'port_pair': { 'description': 'desc', 'name': 'test1', 'ingress': _uuid(), 'egress': _uuid(), 'service_function_parameters': { 'correlation': None, 'weight': 2}, 'tenant_id': tenant_id, 'project_id': tenant_id }} expected_data = self._get_expected_port_pair(data) return_value = copy.copy(expected_data['port_pair']) return_value.update({'id': portpair_id}) instance = self.plugin.return_value instance.create_port_pair.return_value = return_value res = self.api.post(_get_path(PORT_PAIR_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_port_pair.assert_called_with( mock.ANY, port_pair=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('port_pair', res) self.assertEqual(return_value, res['port_pair']) def test_create_port_pair_non_service_function_parameters(self): portpair_id = _uuid() tenant_id = _uuid() data = {'port_pair': { 'ingress': _uuid(), 'egress': _uuid(), 'service_function_parameters': None, 'tenant_id': tenant_id, 'project_id': tenant_id }} expected_data = self._get_expected_port_pair(data) return_value = copy.copy(expected_data['port_pair']) return_value.update({'id': portpair_id}) instance = self.plugin.return_value instance.create_port_pair.return_value = return_value res = self.api.post(_get_path(PORT_PAIR_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_port_pair.assert_called_with( mock.ANY, port_pair=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('port_pair', res) self.assertEqual(return_value, res['port_pair']) def test_create_port_pair_empty_service_function_parameters(self): portpair_id = _uuid() tenant_id = _uuid() data = {'port_pair': { 'ingress': _uuid(), 'egress': _uuid(), 'service_function_parameters': {}, 'tenant_id': tenant_id, 'project_id': tenant_id }} expected_data = self._get_expected_port_pair(data) return_value = copy.copy(expected_data['port_pair']) return_value.update({'id': portpair_id}) instance = self.plugin.return_value instance.create_port_pair.return_value = return_value res = self.api.post(_get_path(PORT_PAIR_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_port_pair.assert_called_with( mock.ANY, port_pair=expected_data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('port_pair', res) self.assertEqual(return_value, res['port_pair']) def test_create_port_pair_invalid_service_function_parameters(self): tenant_id = _uuid() data = {'port_pair': { 'ingress': _uuid(), 'egress': _uuid(), 'service_function_parameters': {'abc': 'def'}, 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(PORT_PAIR_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_port_pair_invalid_correlation(self): tenant_id = _uuid() data = {'port_pair': { 'ingress': _uuid(), 'egress': _uuid(), 'service_function_parameters': {'correlation': 'def'}, 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(PORT_PAIR_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_port_pair_invalid_weight_type(self): tenant_id = _uuid() data = {'port_pair': { 'ingress': _uuid(), 'egress': _uuid(), 'service_function_parameters': {'weight': 'abc'}, 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(PORT_PAIR_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_port_pair_invalid_weight(self): tenant_id = _uuid() data = {'port_pair': { 'ingress': _uuid(), 'egress': _uuid(), 'service_function_parameters': {'weight': -1}, 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(PORT_PAIR_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_port_pair_nouuid_ingress(self): tenant_id = _uuid() data = {'port_pair': { 'ingress': 'abc', 'egress': _uuid(), 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(PORT_PAIR_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_create_port_pair_nouuid_egress(self): tenant_id = _uuid() data = {'port_pair': { 'egress': 'abc', 'ingress': _uuid(), 'tenant_id': tenant_id, 'project_id': tenant_id }} self.assertRaises( webtest.app.AppError, self.api.post, _get_path(PORT_PAIR_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_port_pair_list(self): portpair_id = _uuid() tenant_id = _uuid() return_value = [{ 'tenant_id': tenant_id, 'project_id': tenant_id, 'id': portpair_id }] instance = self.plugin.return_value instance.get_port_pairs.return_value = return_value res = self.api.get(_get_path(PORT_PAIR_PATH, fmt=self.fmt)) instance.get_port_pairs.assert_called_with( mock.ANY, fields=mock.ANY, filters=mock.ANY ) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('port_pairs', res) self.assertEqual(return_value, res['port_pairs']) def test_port_pair_get(self): portpair_id = _uuid() tenant_id = _uuid() return_value = { 'tenant_id': tenant_id, 'project_id': tenant_id, 'id': portpair_id } instance = self.plugin.return_value instance.get_port_pair.return_value = return_value res = self.api.get(_get_path(PORT_PAIR_PATH, id=portpair_id, fmt=self.fmt)) instance.get_port_pair.assert_called_with( mock.ANY, portpair_id, fields=mock.ANY ) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('port_pair', res) self.assertEqual(return_value, res['port_pair']) def test_port_pair_update(self): portpair_id = _uuid() tenant_id = _uuid() update_data = {'port_pair': { 'name': 'new_name', 'description': 'new_desc' }} return_value = { 'tenant_id': tenant_id, 'project_id': tenant_id, 'id': portpair_id } instance = self.plugin.return_value instance.update_port_pair.return_value = return_value res = self.api.put(_get_path(PORT_PAIR_PATH, id=portpair_id, fmt=self.fmt), self.serialize(update_data)) instance.update_port_pair.assert_called_with( mock.ANY, portpair_id, port_pair=update_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('port_pair', res) self.assertEqual(return_value, res['port_pair']) def test_port_pair_update_service_function_parameters(self): portpair_id = _uuid() data = {'port_pair': { 'service_function_parameters': None }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(PORT_PAIR_PATH, id=portpair_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_port_pair_update_ingress(self): portpair_id = _uuid() data = {'port_pair': { 'ingress': _uuid() }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(PORT_PAIR_PATH, id=portpair_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_port_pair_update_egress(self): portpair_id = _uuid() data = {'port_pair': { 'egress': _uuid() }} self.assertRaises( webtest.app.AppError, self.api.put, _get_path(PORT_PAIR_PATH, id=portpair_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) def test_port_pair_delete(self): self._test_entity_delete('port_pair') # NOTE(scsnow): move to neutron-lib def test_validate_list_of_allowed_values(self): data = ['eth_src', 'eth_src', 'illegal'] allowed_values = ['eth_src', 'eth_src'] msg = sfc_ext.validate_list_of_allowed_values(data, allowed_values) self.assertIn("Illegal values in a list:", msg) networking-sfc-10.0.0/networking_sfc/tests/unit/db/0000775000175000017500000000000013656750461022317 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/db/__init__.py0000664000175000017500000000000013656750333024414 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/unit/db/test_sfc_db.py0000664000175000017500000037770513656750333025171 0ustar zuulzuul00000000000000# Copyright 2017 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import logging from unittest import mock from neutron.api import extensions as api_ext from neutron.common import config import neutron.extensions as nextensions from oslo_config import cfg from oslo_utils import importutils from oslo_utils import uuidutils import webob.exc from networking_sfc.db import flowclassifier_db as fdb from networking_sfc.db import sfc_db from networking_sfc import extensions from networking_sfc.extensions import flowclassifier as fc_ext from networking_sfc.extensions import servicegraph as sg_ext from networking_sfc.extensions import sfc from networking_sfc.extensions import tap as tap_ext from networking_sfc.tests import base from networking_sfc.tests.unit.db import test_flowclassifier_db DB_SFC_PLUGIN_CLASS = ( "networking_sfc.db.sfc_db.SfcDbPlugin" ) extensions_path = ':'.join(extensions.__path__ + nextensions.__path__) class SfcDbPluginTestCaseBase( base.BaseTestCase ): def _assert_port_chain_equal(self, res_port_chain, expected): # Flow classifiers are stored in a list, only check items for them for k, v in expected.items(): if type(v) is list: self.assertItemsEqual(res_port_chain[k], v) else: self.assertEqual(res_port_chain[k], v) def _create_port_chain( self, fmt, port_chain=None, expected_res_status=None, **kwargs ): ctx = kwargs.get('context', None) tenant_id = kwargs.get('tenant_id', self._tenant_id) data = {'port_chain': port_chain or {}} if ctx is None: data['port_chain'].update({'tenant_id': tenant_id}) req = self.new_create_request( 'port_chains', data, fmt, context=ctx ) res = req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, res.status_int) return res @contextlib.contextmanager def port_chain(self, fmt=None, port_chain=None, do_delete=True, **kwargs): if not fmt: fmt = self.fmt res = self._create_port_chain(fmt, port_chain, **kwargs) if res.status_int >= 400: logging.error('create port chain result: %s', res) raise webob.exc.HTTPClientError(code=res.status_int) port_chain = self.deserialize(fmt or self.fmt, res) yield port_chain if do_delete: self._delete('port_chains', port_chain['port_chain']['id']) def _create_port_pair_group( self, fmt, port_pair_group=None, expected_res_status=None, **kwargs ): ctx = kwargs.get('context', None) tenant_id = kwargs.get('tenant_id', self._tenant_id) data = {'port_pair_group': port_pair_group or {}} if ctx is None: data['port_pair_group'].update({'tenant_id': tenant_id}) req = self.new_create_request( 'port_pair_groups', data, fmt, context=ctx ) res = req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, res.status_int) return res @contextlib.contextmanager def port_pair_group( self, fmt=None, port_pair_group=None, do_delete=True, **kwargs ): if not fmt: fmt = self.fmt res = self._create_port_pair_group(fmt, port_pair_group, **kwargs) if res.status_int >= 400: logging.error('create port pair group result: %s', res) raise webob.exc.HTTPClientError(code=res.status_int) port_pair_group = self.deserialize(fmt or self.fmt, res) yield port_pair_group if do_delete: self._delete( 'port_pair_groups', port_pair_group['port_pair_group']['id']) def _create_port_pair( self, fmt, port_pair=None, expected_res_status=None, **kwargs ): ctx = kwargs.get('context', None) tenant_id = kwargs.get('tenant_id', self._tenant_id) data = {'port_pair': port_pair or {}} if ctx is None: data['port_pair'].update({'tenant_id': tenant_id}) req = self.new_create_request( 'port_pairs', data, fmt, context=ctx ) res = req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, res.status_int) return res @contextlib.contextmanager def port_pair(self, fmt=None, port_pair=None, do_delete=True, **kwargs): if not fmt: fmt = self.fmt res = self._create_port_pair(fmt, port_pair, **kwargs) if res.status_int >= 400: logging.error('create port pair result: %s', res) raise webob.exc.HTTPClientError(code=res.status_int) port_pair = self.deserialize(fmt or self.fmt, res) yield port_pair if do_delete: self._delete('port_pairs', port_pair['port_pair']['id']) def _create_service_graph( self, fmt, service_graph=None, expected_res_status=None, **kwargs ): ctx = kwargs.get('context', None) project_id = kwargs.get('project_id', self._tenant_id) data = {'service_graph': service_graph or {}} if ctx is None: data['service_graph'].update({'project_id': project_id}) req = self.new_create_request( 'service_graphs', data, fmt, context=ctx ) res = req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, res.status_int) return res @contextlib.contextmanager def service_graph(self, fmt=None, service_graph=None, do_delete=True, **kwargs): if not fmt: fmt = self.fmt res = self._create_service_graph(fmt, service_graph, **kwargs) if res.status_int >= 400: logging.error('create Service Graph result: %s', res) raise webob.exc.HTTPClientError(code=res.status_int) service_graph = self.deserialize(fmt or self.fmt, res) yield service_graph if do_delete: self._delete('service_graphs', service_graph[ 'service_graph']['id']) def _get_expected_port_pair(self, port_pair): return { 'name': port_pair.get('name') or '', 'description': port_pair.get('description') or '', 'egress': port_pair.get('egress'), 'ingress': port_pair.get('ingress'), 'service_function_parameters': port_pair.get( 'service_function_parameters') or { 'correlation': None, 'weight': 1 } } def _test_create_port_pair(self, port_pair, expected_port_pair=None): if expected_port_pair is None: expected_port_pair = self._get_expected_port_pair(port_pair) with self.port_pair(port_pair=port_pair) as pp: for k, v in expected_port_pair.items(): self.assertEqual(pp['port_pair'][k], v) def _test_create_port_pairs( self, port_pairs, expected_port_pairs=None ): if port_pairs: port_pair = port_pairs.pop() if expected_port_pairs: expected_port_pair = expected_port_pairs.pop() else: expected_port_pair = self._get_expected_port_pair(port_pair) with self.port_pair(port_pair=port_pair) as pp: for k, v in expected_port_pair.items(): self.assertEqual(pp['port_pair'][k], v) def _get_expected_port_pair_group(self, port_pair_group): ret = { 'name': port_pair_group.get('name') or '', 'description': port_pair_group.get('description') or '', 'port_pairs': port_pair_group.get('port_pairs') or [], 'port_pair_group_parameters': port_pair_group.get( 'port_pair_group_parameters' ) or {'lb_fields': [], 'ppg_n_tuple_mapping': {'ingress_n_tuple': {}, 'egress_n_tuple': {}}} } if port_pair_group.get('group_id'): ret['group_id'] = port_pair_group['group_id'] return ret def _test_create_port_pair_group( self, port_pair_group, expected_port_pair_group=None ): if expected_port_pair_group is None: expected_port_pair_group = self._get_expected_port_pair_group( port_pair_group) with self.port_pair_group(port_pair_group=port_pair_group) as pg: for k, v in expected_port_pair_group.items(): self.assertEqual(pg['port_pair_group'][k], v) def _test_create_port_pair_groups( self, port_pair_groups, expected_port_pair_groups=None ): if port_pair_groups: port_pair_group = port_pair_groups.pop() if expected_port_pair_groups: expected_port_pair_group = expected_port_pair_groups.pop() else: expected_port_pair_group = self._get_expected_port_pair_group( port_pair_group) with self.port_pair_group(port_pair_group=port_pair_group) as pg: for k, v in expected_port_pair_group.items(): self.assertEqual(pg['port_pair_group'][k], v) @staticmethod def _get_expected_port_chain(port_chain): chain_params = port_chain.get('chain_parameters') or dict() chain_params.setdefault('correlation', 'mpls') chain_params.setdefault('symmetric', False) ret = { 'name': port_chain.get('name') or '', 'description': port_chain.get('description') or '', 'port_pair_groups': port_chain['port_pair_groups'], 'flow_classifiers': port_chain.get('flow_classifiers') or [], 'chain_parameters': chain_params } if port_chain.get('chain_id'): ret['chain_id'] = port_chain['chain_id'] return ret def _test_create_port_chain(self, port_chain, expected_port_chain=None): if expected_port_chain is None: expected_port_chain = self._get_expected_port_chain(port_chain) with self.port_chain(port_chain=port_chain) as pc: for k, v in expected_port_chain.items(): self.assertEqual(pc['port_chain'][k], v) def _test_create_port_chains( self, port_chains, expected_port_chains=None ): if port_chains: port_chain = port_chains.pop() if expected_port_chains: expected_port_chain = expected_port_chains.pop() else: expected_port_chain = self._get_expected_port_chain( port_chain) with self.port_chain(port_chain=port_chain) as pc: for k, v in expected_port_chain.items(): self.assertEqual(pc['port_chain'][k], v) @staticmethod def _get_expected_graph(service_graph): ret = { 'name': service_graph.get('name') or '', 'description': service_graph.get('description') or '', 'port_chains': service_graph.get('port_chains') } return ret def _test_create_service_graph(self, service_graph, expected_graph=None): if expected_graph is None: expected_graph = self._get_expected_graph(service_graph) with self.service_graph(service_graph=service_graph) as graph: for k, v in expected_graph.items(): self.assertEqual(graph['service_graph'][k], v) class SfcDbPluginTestCase( base.NeutronDbPluginV2TestCase, test_flowclassifier_db.FlowClassifierDbPluginTestCaseBase, SfcDbPluginTestCaseBase ): resource_prefix_map = dict([ (k, sfc.SFC_PREFIX) for k in sfc.RESOURCE_ATTRIBUTE_MAP.keys() ] + [ (k, fc_ext.FLOW_CLASSIFIER_PREFIX) for k in fc_ext.RESOURCE_ATTRIBUTE_MAP.keys() ] + [ (k, sg_ext.SG_PREFIX) for k in sg_ext.RESOURCE_ATTRIBUTE_MAP.keys() ]) def setUp(self, core_plugin=None, sfc_plugin=None, flowclassifier_plugin=None, ext_mgr=None): mock_log_p = mock.patch.object(sfc_db, 'LOG') self.mock_log = mock_log_p.start() cfg.CONF.register_opts(sfc.sfc_quota_opts, 'QUOTAS') if not sfc_plugin: sfc_plugin = DB_SFC_PLUGIN_CLASS if not flowclassifier_plugin: flowclassifier_plugin = ( test_flowclassifier_db.DB_FLOWCLASSIFIER_PLUGIN_CLASS) service_plugins = { sfc.SFC_EXT: sfc_plugin, fc_ext.FLOW_CLASSIFIER_EXT: flowclassifier_plugin } sfc_db.SfcDbPlugin.supported_extension_aliases = [ sfc.SFC_EXT, sg_ext.SG_EXT, tap_ext.TAP_EXT] sfc_db.SfcDbPlugin.path_prefix = sfc.SFC_PREFIX fdb.FlowClassifierDbPlugin.supported_extension_aliases = [ fc_ext.FLOW_CLASSIFIER_EXT] fdb.FlowClassifierDbPlugin.path_prefix = ( fc_ext.FLOW_CLASSIFIER_PREFIX ) super(SfcDbPluginTestCase, self).setUp( ext_mgr=ext_mgr, plugin=core_plugin, service_plugins=service_plugins ) if not ext_mgr: self.sfc_plugin = importutils.import_object(sfc_plugin) self.flowclassifier_plugin = importutils.import_object( flowclassifier_plugin) # Note (vks1): Auto-load extensions. ext_mgr = api_ext.PluginAwareExtensionManager.get_instance() app = config.load_paste_app('extensions_test_app') self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr) def test_create_port_chain(self): with self.port_pair_group(port_pair_group={}) as pg: self._test_create_port_chain({ 'port_pair_groups': [pg['port_pair_group']['id']]}) def test_quota_create_port_chain(self): cfg.CONF.set_override('quota_port_chain', 3, group='QUOTAS') with self.port_pair_group( port_pair_group={}, do_delete=False ) as pg1, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg2, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg3, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg4: self._create_port_chain( self.fmt, { 'port_pair_groups': [pg1['port_pair_group']['id']] }, expected_res_status=201) self._create_port_chain( self.fmt, { 'port_pair_groups': [pg2['port_pair_group']['id']] }, expected_res_status=201) self._create_port_chain( self.fmt, { 'port_pair_groups': [pg3['port_pair_group']['id']] }, expected_res_status=201) self._create_port_chain( self.fmt, { 'port_pair_groups': [pg4['port_pair_group']['id']] }, expected_res_status=409) def test_create_port_chain_all_fields(self): with self.port_pair_group(port_pair_group={}) as pg: self._test_create_port_chain({ 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [], 'name': 'abc', 'description': 'def', 'chain_parameters': {'symmetric': False, 'correlation': 'mpls'} }) def test_create_port_chain_all_fields_with_chain_id(self): with self.port_pair_group(port_pair_group={}) as pg: self._test_create_port_chain({ 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [], 'name': 'abc', 'description': 'def', 'chain_parameters': {'symmetric': False, 'correlation': 'mpls'}, 'chain_id': 99 }) def test_create_port_chain_all_fields_with_symmetric(self): with self.port_pair_group(port_pair_group={}) as pg: self._test_create_port_chain({ 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [], 'name': 'abc', 'description': 'def', 'chain_parameters': {'symmetric': True, 'correlation': 'mpls'} }) def test_create_port_chain_multi_port_pair_groups(self): with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: self._test_create_port_chain({ 'port_pair_groups': [ pg1['port_pair_group']['id'], pg2['port_pair_group']['id'] ] }) def test_create_port_chain_shared_port_pair_groups(self): with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2, self.port_pair_group( port_pair_group={} ) as pg3: self._test_create_port_chains([{ 'port_pair_groups': [ pg1['port_pair_group']['id'], pg2['port_pair_group']['id'] ] }, { 'port_pair_groups': [ pg1['port_pair_group']['id'], pg3['port_pair_group']['id'] ] }]) def test_create_port_chain_shared_port_pair_groups_different_order(self): with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: self._test_create_port_chains([{ 'port_pair_groups': [ pg1['port_pair_group']['id'], pg2['port_pair_group']['id'] ] }, { 'port_pair_groups': [ pg2['port_pair_group']['id'], pg1['port_pair_group']['id'] ] }]) def test_create_port_chain_with_empty_chain_parameters(self): with self.port_pair_group(port_pair_group={}) as pg: self._test_create_port_chain({ 'chain_parameters': {}, 'port_pair_groups': [pg['port_pair_group']['id']] }) def test_create_port_chain_with_none_chain_parameters(self): with self.port_pair_group(port_pair_group={}) as pg: self._test_create_port_chain({ 'chain_parameters': None, 'port_pair_groups': [pg['port_pair_group']['id']] }) def test_create_port_chain_with_default_chain_parameters(self): with self.port_pair_group(port_pair_group={}) as pg: self._test_create_port_chain({ 'chain_parameters': {'symmetric': False, 'correlation': 'mpls'}, 'port_pair_groups': [pg['port_pair_group']['id']] }) def test_create_port_chain_with_nsh_correlation(self): with self.port_pair_group(port_pair_group={}) as pg: self._test_create_port_chain({ 'chain_parameters': {'symmetric': False, 'correlation': 'nsh'}, 'port_pair_groups': [pg['port_pair_group']['id']] }) def test_create_port_chain_with_nsh_correlation_incompatible_ppg_fail( self): with self.port( name='port1', device_id='default' ) as port1, self.port( name='port2', device_id='default' ) as port2: with self.port_pair(port_pair={ 'ingress': port1['port']['id'], 'egress': port1['port']['id'], 'service_function_parameters': {'correlation': 'nsh'} }) as pp1, self.port_pair(port_pair={ 'ingress': port2['port']['id'], 'egress': port2['port']['id'], 'service_function_parameters': {'correlation': 'mpls'} }) as pp2: with self.port_pair_group(port_pair_group={ 'port_pairs': [ pp1['port_pair']['id'] ] }) as ppg1, self.port_pair_group(port_pair_group={ 'port_pairs': [ pp2['port_pair']['id'] ] }) as ppg2: self._create_port_chain( self.fmt, { 'chain_parameters': {'symmetric': False, 'correlation': 'nsh'}, 'port_pair_groups': [ ppg1['port_pair_group']['id'], ppg2['port_pair_group']['id']], }, expected_res_status=400) def test_create_port_chains_with_conflicting_chain_ids(self): with self.port_pair_group( port_pair_group={}, do_delete=False ) as pg1, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg2: self._create_port_chain( self.fmt, { 'port_pair_groups': [pg1['port_pair_group']['id']], 'chain_id': 88 }, expected_res_status=201) self._create_port_chain( self.fmt, { 'port_pair_groups': [pg2['port_pair_group']['id']], 'chain_id': 88 }, expected_res_status=400 ) def test_create_port_chain_with_none_flow_classifiers(self): with self.port_pair_group(port_pair_group={}) as pg: self._test_create_port_chain({ 'flow_classifiers': None, 'port_pair_groups': [pg['port_pair_group']['id']] }) def test_create_port_chain_with_empty_flow_classifiers(self): with self.port_pair_group(port_pair_group={}) as pg: self._test_create_port_chain({ 'flow_classifiers': [], 'port_pair_groups': [pg['port_pair_group']['id']] }) def test_create_port_chain_with_flow_classifiers(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'logical_source_port': port['port']['id'] }) as fc: with self.port_pair_group(port_pair_group={}) as pg: self._test_create_port_chain({ 'flow_classifiers': [fc['flow_classifier']['id']], 'port_pair_groups': [pg['port_pair_group']['id']] }) def test_create_port_chain_with_multi_flow_classifiers(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port['port']['id'] }) as fc1, self.flow_classifier(flow_classifier={ 'source_ip_prefix': '192.168.101.0/24', 'logical_source_port': port['port']['id'] }) as fc2: with self.port_pair_group(port_pair_group={}) as pg: self._test_create_port_chain({ 'flow_classifiers': [ fc1['flow_classifier']['id'], fc2['flow_classifier']['id'] ], 'port_pair_groups': [pg['port_pair_group']['id']] }) def test_create_port_chain_with_flow_classifiers_basic_the_same(self): with self.port( name='test1' ) as port1, self.port( name='test2' ) as port2: with self.flow_classifier(flow_classifier={ 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port1['port']['id'] }) as fc1, self.flow_classifier(flow_classifier={ 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port2['port']['id'] }) as fc2: with self.port_pair_group(port_pair_group={}) as pg: self._test_create_port_chain({ 'flow_classifiers': [ fc1['flow_classifier']['id'], fc2['flow_classifier']['id'] ], 'port_pair_groups': [pg['port_pair_group']['id']] }) def test_create_multi_port_chain_with_flow_classifiers(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port['port']['id'] }) as fc1, self.flow_classifier(flow_classifier={ 'source_ip_prefix': '192.168.101.0/24', 'logical_source_port': port['port']['id'] }) as fc2: with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain( port_chain={ 'flow_classifiers': [ fc1['flow_classifier']['id'] ], 'port_pair_groups': [ pg1['port_pair_group']['id'] ] } ): self._test_create_port_chain({ 'flow_classifiers': [ fc2['flow_classifier']['id'] ], 'port_pair_groups': [pg2['port_pair_group']['id']] }) def test_create_multi_port_chain_with_conflict_flow_classifiers(self): with self.port( name='test1' ) as port1, self.port( name='test2' ) as port2: with self.flow_classifier(flow_classifier={ 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port1['port']['id'] }) as fc1, self.flow_classifier(flow_classifier={ 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port2['port']['id'] }) as fc2: with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain( port_chain={ 'flow_classifiers': [ fc1['flow_classifier']['id'] ], 'port_pair_groups': [ pg1['port_pair_group']['id'] ] } ): self._create_port_chain( self.fmt, { 'flow_classifiers': [ fc2['flow_classifier']['id'] ], 'port_pair_groups': [ pg2['port_pair_group']['id'] ] }, expected_res_status=400 ) def test_create_multi_port_chain_with_same_flow_classifier(self): with self.port( name='test1' ) as port1: with self.flow_classifier(flow_classifier={ 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port1['port']['id'] }) as fc: with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain( port_chain={ 'flow_classifiers': [ fc['flow_classifier']['id'] ], 'port_pair_groups': [ pg1['port_pair_group']['id'] ] } ): self._create_port_chain( self.fmt, { 'flow_classifiers': [ fc['flow_classifier']['id'] ], 'port_pair_groups': [ pg2['port_pair_group']['id'] ] }, expected_res_status=409 ) def test_create_port_chain_with_port_pairs(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': dst_port['port']['id'], 'egress': src_port['port']['id'] }) as pp2: with self.port_pair_group(port_pair_group={ 'port_pairs': [ pp1['port_pair']['id'] ] }) as pg1, self.port_pair_group(port_pair_group={ 'port_pairs': [ pp2['port_pair']['id'] ] }) as pg2: self._test_create_port_chain({ 'port_pair_groups': [ pg1['port_pair_group']['id'], pg2['port_pair_group']['id'] ] }) def test_create_port_chain_with_empty_port_pair_groups(self): self._create_port_chain( self.fmt, {'port_pair_groups': []}, expected_res_status=400 ) def test_create_port_chain_with_nonuuid_port_pair_group_id(self): self._create_port_chain( self.fmt, {'port_pair_groups': ['unknown']}, expected_res_status=400 ) def test_create_port_chain_with_unknown_port_pair_group_id(self): self._create_port_chain( self.fmt, {'port_pair_groups': [uuidutils.generate_uuid()]}, expected_res_status=404 ) def test_create_port_chain_with_same_port_pair_groups(self): with self.port_pair_group( port_pair_group={} ) as pg: with self.port_chain( port_chain={ 'port_pair_groups': [pg['port_pair_group']['id']] } ): self._create_port_chain( self.fmt, { 'port_pair_groups': [pg['port_pair_group']['id']] }, expected_res_status=409 ) def test_create_port_chain_with_no_port_pair_groups(self): self._create_port_chain( self.fmt, {}, expected_res_status=400 ) def test_create_port_chain_with_consecutive_tap_port_pair_groups(self): with self.port( name='port1', device_id='tap_device1' ) as tap_port1, self.port( name='port2', device_id='tap_device2' ) as tap_port2: with self.port_pair( port_pair={ 'ingress': tap_port1['port']['id'], 'egress': tap_port1['port']['id'] } ) as tap_pp1, self.port_pair( port_pair={ 'ingress': tap_port2['port']['id'], 'egress': tap_port2['port']['id'] } ) as tap_pp2: with self.port_pair_group( self.fmt, { 'port_pairs': [tap_pp1['port_pair']['id']], 'tap_enabled': True } ) as pg1, self.port_pair_group( self.fmt, { 'port_pairs': [tap_pp2['port_pair']['id']], 'tap_enabled': True } ) as pg2: self._create_port_chain( self.fmt, { 'port_pair_groups': [ pg1['port_pair_group']['id'], pg2['port_pair_group']['id'] ] }, expected_res_status=400 ) def test_create_port_chain_with_non_consecutive_tap_port_pair_groups(self): with self.port( name='port1', device_id='tap_device1' ) as tap_port1, self.port( name='port2', device_id='default_device' ) as ingress_default, self.port( name='port3', device_id='default_device' ) as egress_default, self.port( name='port4', device_id='tap_device2' ) as tap_port2: with self.port_pair( port_pair={ 'ingress': tap_port1['port']['id'], 'egress': tap_port1['port']['id'] } ) as tap_pp1, self.port_pair( port_pair={ 'ingress': ingress_default['port']['id'], 'egress': egress_default['port']['id'] } ) as default_pp, self.port_pair( port_pair={ 'ingress': tap_port2['port']['id'], 'egress': tap_port2['port']['id'] } ) as tap_pp2: with self.port_pair_group( self.fmt, { 'port_pairs': [tap_pp1['port_pair']['id']], 'tap_enabled': True, 'port_pair_group_parameters': { 'lb_fields': [], 'ppg_n_tuple_mapping': {'ingress_n_tuple': {}, 'egress_n_tuple': {}}} } ) as tap_pg1, self.port_pair_group( self.fmt, { 'port_pairs': [default_pp['port_pair']['id']], 'tap_enabled': False, 'port_pair_group_parameters': { 'lb_fields': [], 'ppg_n_tuple_mapping': {'ingress_n_tuple': {}, 'egress_n_tuple': {}} } } ) as default_pg, self.port_pair_group( self.fmt, { 'port_pairs': [tap_pp2['port_pair']['id']], 'tap_enabled': True, 'port_pair_group_parameters': { 'lb_fields': [], 'ppg_n_tuple_mapping': {'ingress_n_tuple': {}, 'egress_n_tuple': {} } } } ) as tap_pg2: self._test_create_port_chain( { 'port_pair_groups': [ tap_pg1['port_pair_group']['id'], default_pg['port_pair_group']['id'], tap_pg2['port_pair_group']['id'] ] } ) def test_create_port_chain_with_invalid_chain_parameters(self): with self.port_pair_group(port_pair_group={}) as pg: self._create_port_chain( self.fmt, { 'chain_parameters': {'correlation': 'unknown'}, 'port_pair_groups': [pg['port_pair_group']['id']] }, expected_res_status=400 ) def test_create_port_chain_with_invalid_chain_parameters_symmetric(self): with self.port_pair_group(port_pair_group={}) as pg: self._create_port_chain( self.fmt, { 'chain_parameters': {'symmetric': 'abc'}, 'port_pair_groups': [pg['port_pair_group']['id']] }, expected_res_status=400 ) def test_create_port_chain_unknown_flow_classifiers(self): with self.port_pair_group(port_pair_group={}) as pg: self._create_port_chain( self.fmt, { 'flow_classifiers': [uuidutils.generate_uuid()], 'port_pair_groups': [pg['port_pair_group']['id']] }, expected_res_status=404 ) def test_create_port_chain_nouuid_flow_classifiers(self): with self.port_pair_group(port_pair_group={}) as pg: self._create_port_chain( self.fmt, { 'flow_classifiers': ['unknown'], 'port_pair_groups': [pg['port_pair_group']['id']] }, expected_res_status=400 ) def test_list_port_chains(self): with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain(port_chain={ 'port_pair_groups': [pg1['port_pair_group']['id']] }) as pc1, self.port_chain(port_chain={ 'port_pair_groups': [pg2['port_pair_group']['id']] }) as pc2: port_chains = [pc1, pc2] self._test_list_resources( 'port_chain', port_chains ) def test_list_port_chains_with_params(self): with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg1['port_pair_group']['id']] }) as pc1, self.port_chain(port_chain={ 'name': 'test2', 'port_pair_groups': [pg2['port_pair_group']['id']] }) as pc2: self._test_list_resources( 'port_chain', [pc1], query_params='name=test1' ) self._test_list_resources( 'port_chain', [pc2], query_params='name=test2' ) self._test_list_resources( 'port_chain', [], query_params='name=test3' ) def test_list_port_chains_with_unknown_params(self): with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain(port_chain={ 'name': 'test1', 'port_pair_groups': [pg1['port_pair_group']['id']] }) as pc1, self.port_chain(port_chain={ 'name': 'test2', 'port_pair_groups': [pg2['port_pair_group']['id']] }) as pc2: self._test_list_resources( 'port_chain', [pc1, pc2], query_params='hello=test3' ) def test_show_port_chain(self): with self.port_pair_group( port_pair_group={} ) as pg: with self.port_chain(port_chain={ 'name': 'test1', 'description': 'portchain', 'port_pair_groups': [pg['port_pair_group']['id']] }) as pc: req = self.new_show_request( 'port_chains', pc['port_chain']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) expected = self._get_expected_port_chain(pc['port_chain']) self._assert_port_chain_equal(res['port_chain'], expected) def test_show_port_chain_noexist(self): req = self.new_show_request( 'port_chains', '1' ) res = req.get_response(self.ext_api) self.assertEqual(404, res.status_int) def test_update_port_chain_add_flow_classifiers(self): with self.port( name='test1' ) as port: with self.flow_classifier( flow_classifier={ 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port['port']['id'] } ) as fc1, self.flow_classifier( flow_classifier={ 'source_ip_prefix': '192.168.101.0/24', 'logical_source_port': port['port']['id'] } ) as fc2: with self.port_pair_group( port_pair_group={} ) as pg: with self.port_chain(port_chain={ 'name': 'test1', 'description': 'desc1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc1['flow_classifier']['id']] }) as pc: updates = { 'name': 'test2', 'description': 'desc2', 'flow_classifiers': [ fc1['flow_classifier']['id'], fc2['flow_classifier']['id'] ] } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc['port_chain']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) expected = pc['port_chain'] expected.update(updates) self._assert_port_chain_equal( res['port_chain'], expected ) req = self.new_show_request( 'port_chains', pc['port_chain']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self._assert_port_chain_equal( res['port_chain'], expected ) def test_update_port_chain_remove_flow_classifiers(self): with self.port( name='test1' ) as port: with self.flow_classifier( flow_classifier={ 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port['port']['id'] } ) as fc1, self.flow_classifier( flow_classifier={ 'source_ip_prefix': '192.168.101.0/24', 'logical_source_port': port['port']['id'] } ) as fc2: with self.port_pair_group( port_pair_group={} ) as pg: with self.port_chain(port_chain={ 'name': 'test1', 'description': 'desc1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [ fc1['flow_classifier']['id'], fc2['flow_classifier']['id'] ] }) as pc: updates = { 'name': 'test2', 'description': 'desc2', 'flow_classifiers': [ fc1['flow_classifier']['id'] ] } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc['port_chain']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) expected = pc['port_chain'] expected.update(updates) self._assert_port_chain_equal( res['port_chain'], expected ) req = self.new_show_request( 'port_chains', pc['port_chain']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self._assert_port_chain_equal( res['port_chain'], expected ) def test_update_port_chain_replace_flow_classifiers(self): with self.port( name='test1' ) as port: with self.flow_classifier( flow_classifier={ 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port['port']['id'] } ) as fc1, self.flow_classifier( flow_classifier={ 'source_ip_prefix': '192.168.101.0/24', 'logical_source_port': port['port']['id'] } ) as fc2: with self.port_pair_group( port_pair_group={} ) as pg: with self.port_chain(port_chain={ 'name': 'test1', 'description': 'desc1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc1['flow_classifier']['id']] }) as pc: updates = { 'name': 'test2', 'description': 'desc2', 'flow_classifiers': [fc2['flow_classifier']['id']] } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc['port_chain']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) expected = pc['port_chain'] expected.update(updates) self._assert_port_chain_equal( res['port_chain'], expected ) req = self.new_show_request( 'port_chains', pc['port_chain']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self._assert_port_chain_equal( res['port_chain'], expected ) def test_update_port_chain_flow_classifiers_basic_the_same(self): with self.port( name='test1' ) as port1, self.port( name='test2' ) as port2: with self.flow_classifier( flow_classifier={ 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port1['port']['id'] } ) as fc1, self.flow_classifier( flow_classifier={ 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port2['port']['id'] } ) as fc2: with self.port_pair_group( port_pair_group={} ) as pg: with self.port_chain(port_chain={ 'name': 'test1', 'description': 'desc1', 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc1['flow_classifier']['id']] }) as pc: updates = { 'name': 'test2', 'description': 'desc2', 'flow_classifiers': [fc2['flow_classifier']['id']] } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc['port_chain']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) expected = pc['port_chain'] expected.update(updates) self._assert_port_chain_equal( res['port_chain'], expected ) req = self.new_show_request( 'port_chains', pc['port_chain']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self._assert_port_chain_equal( res['port_chain'], expected ) def test_update_port_chain_conflict_flow_classifiers(self): with self.port( name='test1' ) as port1, self.port( name='test2' ) as port2: with self.flow_classifier( flow_classifier={ 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port1['port']['id'] } ) as fc1, self.flow_classifier( flow_classifier={ 'source_ip_prefix': '192.168.101.0/24', 'logical_source_port': port1['port']['id'] } ) as fc2, self.flow_classifier( flow_classifier={ 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port2['port']['id'] } ) as fc3: with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain(port_chain={ 'port_pair_groups': [pg1['port_pair_group']['id']], 'flow_classifiers': [fc1['flow_classifier']['id']] }), self.port_chain(port_chain={ 'name': 'test2', 'port_pair_groups': [pg2['port_pair_group']['id']], 'flow_classifiers': [fc2['flow_classifier']['id']] }) as pc2: updates = { 'flow_classifiers': [fc3['flow_classifier']['id']] } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc2['port_chain']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(400, res.status_int) def test_update_port_chain_add_port_pair_groups(self): with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain(port_chain={ 'port_pair_groups': [pg1['port_pair_group']['id']], }) as pc: updates = { 'port_pair_groups': [ pg1['port_pair_group']['id'], pg2['port_pair_group']['id'] ] } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc['port_chain']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) expected = pc['port_chain'] expected.update(updates) self._assert_port_chain_equal(res['port_chain'], expected) req = self.new_show_request( 'port_chains', pc['port_chain']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self._assert_port_chain_equal(res['port_chain'], expected) def test_update_port_chain_remove_port_pair_groups(self): with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain(port_chain={ 'port_pair_groups': [ pg1['port_pair_group']['id'], pg2['port_pair_group']['id'], ], }) as pc: updates = { 'port_pair_groups': [ pg1['port_pair_group']['id'] ] } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc['port_chain']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) expected = pc['port_chain'] expected.update(updates) self._assert_port_chain_equal(res['port_chain'], expected) req = self.new_show_request( 'port_chains', pc['port_chain']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self._assert_port_chain_equal(res['port_chain'], expected) def test_update_port_chain_replace_port_pair_groups(self): with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain(port_chain={ 'port_pair_groups': [pg1['port_pair_group']['id']], }) as pc: updates = { 'port_pair_groups': [pg2['port_pair_group']['id']] } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc['port_chain']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) expected = pc['port_chain'] expected.update(updates) self._assert_port_chain_equal(res['port_chain'], expected) req = self.new_show_request( 'port_chains', pc['port_chain']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self._assert_port_chain_equal(res['port_chain'], expected) def test_update_port_chain_chain_parameters(self): with self.port_pair_group( port_pair_group={} ) as pg: with self.port_chain(port_chain={ 'port_pair_groups': [pg['port_pair_group']['id']], }) as pc: updates = { 'chain_parameters': {'correlation': 'mpls'} } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc['port_chain']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(400, res.status_int) def test_update_port_chain_part_of_graph_fail(self): with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain(port_chain={ 'port_pair_groups': [pg1['port_pair_group']['id']] }) as pc1, self.port_chain(port_chain={ 'port_pair_groups': [pg2['port_pair_group']['id']] }) as pc2: with self.service_graph(service_graph={ 'name': 'test1', 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id']]} }): updates = { 'port_pair_groups': [uuidutils.generate_uuid()] } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc1['port_chain']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(409, res.status_int) updates = { 'flow_classifiers': [uuidutils.generate_uuid()] } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc2['port_chain']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(409, res.status_int) updates = { 'name': 'new name', 'description': 'new description' } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc1['port_chain']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(200, res.status_int) def test_update_port_chain_consistency_with_consecutive_tap_ppg(self): with self.port( name='port1', device_id='tap_device1' ) as tap_port1, self.port( name='port2', device_id='tap_device2' ) as tap_port2: with self.port_pair( port_pair={ 'ingress': tap_port1['port']['id'], 'egress': tap_port1['port']['id'] } ) as tap_pp1, self.port_pair( port_pair={ 'ingress': tap_port2['port']['id'], 'egress': tap_port2['port']['id'] } ) as tap_pp2: with self.port_pair_group( self.fmt, { 'port_pairs': [tap_pp1['port_pair']['id']], 'tap_enabled': True } ) as pg1, self.port_pair_group( self.fmt, { 'port_pairs': [tap_pp2['port_pair']['id']], 'tap_enabled': True } ) as pg2: with self.port_chain( port_chain={ 'port_pair_groups': [ pg1['port_pair_group']['id'] ] } ) as pc: updates = { 'port_pair_groups': [ pg1['port_pair_group']['id'], pg2['port_pair_group']['id'] ] } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc['port_chain']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(400, res.status_int) def test_update_tap_port_chain_consistency(self): with self.port( name='port1', device_id='tap_device1' ) as tap_port1, self.port( name='port2', device_id='tap_device2' ) as tap_port2: with self.port_pair( port_pair={ 'ingress': tap_port1['port']['id'], 'egress': tap_port1['port']['id'] } ) as tap_pp1, self.port_pair( port_pair={ 'ingress': tap_port2['port']['id'], 'egress': tap_port2['port']['id'] } ) as tap_pp2: with self.port_pair_group( self.fmt, { 'port_pairs': [tap_pp1['port_pair']['id']], 'tap_enabled': True } ) as pg1, self.port_pair_group( self.fmt, { 'port_pairs': [tap_pp2['port_pair']['id']], 'tap_enabled': False } ) as pg2: with self.port_chain( port_chain={ 'port_pair_groups': [ pg1['port_pair_group']['id'] ] } ) as pc: updates = { 'port_pair_groups': [ pg1['port_pair_group']['id'], pg2['port_pair_group']['id'] ] } req = self.new_update_request( 'port_chains', {'port_chain': updates}, pc['port_chain']['id'] ) resp = req.get_response(self.ext_api) self.assertEqual(200, resp.status_int) res = self.deserialize(self.fmt, resp) expected = pc['port_chain'] expected.update(updates) self._assert_port_chain_equal(res['port_chain'], expected) def test_delete_port_chain(self): with self.port_pair_group( port_pair_group={} ) as pg: with self.port_chain(port_chain={ 'port_pair_groups': [pg['port_pair_group']['id']] }, do_delete=False) as pc: req = self.new_delete_request( 'port_chains', pc['port_chain']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(204, res.status_int) req = self.new_show_request( 'port_chains', pc['port_chain']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(404, res.status_int) req = self.new_show_request( 'port_pair_groups', pg['port_pair_group']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(200, res.status_int) def test_delete_port_chain_noexist(self): req = self.new_delete_request( 'port_chains', '1' ) res = req.get_response(self.ext_api) self.assertEqual(404, res.status_int) def test_delete_port_chain_part_of_graph_fail(self): with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain(port_chain={ 'port_pair_groups': [pg1['port_pair_group']['id']] }) as pc1, self.port_chain(port_chain={ 'port_pair_groups': [pg2['port_pair_group']['id']] }) as pc2: with self.service_graph(service_graph={ 'name': 'test1', 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id']]} }): req = self.new_delete_request( 'port_chains', pc1['port_chain']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(409, res.status_int) req = self.new_delete_request( 'port_chains', pc2['port_chain']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(409, res.status_int) def test_delete_flow_classifier_port_chain_exist(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'logical_source_port': port['port']['id'] }) as fc: with self.port_pair_group(port_pair_group={ }) as pg: with self.port_chain(port_chain={ 'port_pair_groups': [pg['port_pair_group']['id']], 'flow_classifiers': [fc['flow_classifier']['id']] }): req = self.new_delete_request( 'flow_classifiers', fc['flow_classifier']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(409, res.status_int) def test_create_port_pair_group(self): self._test_create_port_pair_group({}) def test_quota_create_port_pair_group_quota(self): cfg.CONF.set_override('quota_port_pair_group', 3, group='QUOTAS') self._create_port_pair_group( self.fmt, {'port_pairs': []}, expected_res_status=201 ) self._create_port_pair_group( self.fmt, {'port_pairs': []}, expected_res_status=201 ) self._create_port_pair_group( self.fmt, {'port_pairs': []}, expected_res_status=201 ) self._create_port_pair_group( self.fmt, {'port_pairs': []}, expected_res_status=409 ) def test_create_port_pair_group_all_fields(self): self._test_create_port_pair_group({ 'name': 'test1', 'description': 'desc1', 'port_pairs': [], 'tap_enabled': False, 'port_pair_group_parameters': { 'lb_fields': ['ip_src', 'ip_dst'], 'ppg_n_tuple_mapping': { 'ingress_n_tuple': {'source_ip_prefix': None}, 'egress_n_tuple': {'destination_ip_prefix': None}} } }) def test_create_port_pair_group_with_empty_parameters(self): self._test_create_port_pair_group({ 'name': 'test1', 'description': 'desc1', 'port_pairs': [], 'port_pair_group_parameters': {} }) def test_create_port_pair_group_with_none_parameters(self): self._test_create_port_pair_group({ 'name': 'test1', 'description': 'desc1', 'port_pairs': [], 'port_pair_group_parameters': None }) def test_create_port_pair_group_with_default_parameters(self): self._test_create_port_pair_group({ 'name': 'test1', 'description': 'desc1', 'port_pairs': [], 'tap_enabled': False, 'port_pair_group_parameters': { 'lb_fields': [], 'ppg_n_tuple_mapping': {} } }) def test_create_port_pair_group_with_tap_enabled_parameter_true(self): self._test_create_port_pair_group( { 'name': 'test1', 'description': 'desc1', 'port_pairs': [], 'tap_enabled': True, 'port_pair_group_parameters': {} }, expected_port_pair_group={ 'name': 'test1', 'description': 'desc1', 'port_pairs': [], 'tap_enabled': True, 'port_pair_group_parameters': { 'lb_fields': [], 'ppg_n_tuple_mapping': {u'egress_n_tuple': {}, u'ingress_n_tuple': {}}, } } ) def test_create_ppg_with_all_params_and_tap_enabled_parameter_true(self): self._create_port_pair_group( self.fmt, { 'name': 'test1', 'description': 'desc1', 'port_pairs': [], 'tap_enabled': True, 'port_pair_group_parameters': { 'lb_fields': ['ip_src', 'ip_dst'], 'ppg_n_tuple_mapping': { 'ingress_n_tuple': {'source_ip_prefix': None}, 'egress_n_tuple': {'destination_ip_prefix': None}} } }) def test_create_port_pair_group_with_port_pairs(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': dst_port['port']['id'], 'egress': src_port['port']['id'] }) as pp2: self._test_create_port_pair_group({ 'port_pairs': [ pp1['port_pair']['id'], pp2['port_pair']['id'] ] }) def test_create_tap_port_pair_group_with_single_port_pair(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pp1: self._test_create_port_pair_group( { 'port_pairs': [ pp1['port_pair']['id'], ], 'tap_enabled': True } ) def test_create_tap_pair_group_with_multiple_port_pairs(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': dst_port['port']['id'], 'egress': src_port['port']['id'] }) as pp2: self._create_port_pair_group( self.fmt, { 'port_pairs': [ pp1['port_pair']['id'], pp2['port_pair']['id'] ], 'tap_enabled': True }, expected_res_status=400 ) def test_create_port_pair_group_consistent_correlations(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'], 'service_function_parameters': {'correlation': 'mpls'} }) as pp1, self.port_pair(port_pair={ 'ingress': dst_port['port']['id'], 'egress': src_port['port']['id'], 'service_function_parameters': {'correlation': 'mpls'} }) as pp2: self._test_create_port_pair_group({ 'port_pairs': [ pp1['port_pair']['id'], pp2['port_pair']['id'] ] }) def test_create_port_pair_group_inconsistent_correlations(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'], 'service_function_parameters': {'correlation': 'mpls'} }) as pp1, self.port_pair(port_pair={ 'ingress': dst_port['port']['id'], 'egress': src_port['port']['id'], 'service_function_parameters': {'correlation': None} }) as pp2: self._create_port_pair_group( self.fmt, {'port_pairs': [ pp1['port_pair']['id'], pp2['port_pair']['id'] ]}, expected_res_status=400) def test_create_port_pair_group_with_nouuid_port_pair_id(self): self._create_port_pair_group( self.fmt, {'port_pairs': ['unknown']}, expected_res_status=400 ) def test_create_port_pair_group_with_unknown_port_pair_id(self): self._create_port_pair_group( self.fmt, {'port_pairs': [uuidutils.generate_uuid()]}, expected_res_status=404 ) def test_create_port_pair_group_share_port_pair_id(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pp: with self.port_pair_group(port_pair_group={ 'port_pairs': [pp['port_pair']['id']] }): self._create_port_pair_group( self.fmt, {'port_pairs': [pp['port_pair']['id']]}, expected_res_status=409 ) def test_list_port_pair_groups(self): with self.port_pair_group(port_pair_group={ 'name': 'test1' }) as pc1, self.port_pair_group(port_pair_group={ 'name': 'test2' }) as pc2: port_pair_groups = [pc1, pc2] self._test_list_resources( 'port_pair_group', port_pair_groups ) def test_list_port_pair_groups_with_params(self): with self.port_pair_group(port_pair_group={ 'name': 'test1' }) as pc1, self.port_pair_group(port_pair_group={ 'name': 'test2' }) as pc2: self._test_list_resources( 'port_pair_group', [pc1], query_params='name=test1' ) self._test_list_resources( 'port_pair_group', [pc2], query_params='name=test2' ) self._test_list_resources( 'port_pair_group', [], query_params='name=test3' ) def test_list_port_pair_groups_with_unknown_params(self): with self.port_pair_group(port_pair_group={ 'name': 'test1' }) as pc1, self.port_pair_group(port_pair_group={ 'name': 'test2' }) as pc2: self._test_list_resources( 'port_pair_group', [pc1, pc2], query_params='hello=test3' ) def test_show_port_pair_group(self): with self.port_pair_group(port_pair_group={ 'name': 'test1' }) as pc: req = self.new_show_request( 'port_pair_groups', pc['port_pair_group']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) for k, v in pc['port_pair_group'].items(): self.assertEqual(res['port_pair_group'][k], v) def test_show_port_pair_group_noexist(self): req = self.new_show_request( 'port_pair_groups', '1' ) res = req.get_response(self.ext_api) self.assertEqual(404, res.status_int) def test_update_port_pair_group(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': dst_port['port']['id'], 'egress': src_port['port']['id'] }) as pp2: with self.port_pair_group(port_pair_group={ 'name': 'test1', 'description': 'desc1', 'port_pairs': [pp1['port_pair']['id']] }) as pg: updates = { 'name': 'test2', 'description': 'desc2', 'port_pairs': [pp2['port_pair']['id']] } req = self.new_update_request( 'port_pair_groups', {'port_pair_group': updates}, pg['port_pair_group']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) expected = pg['port_pair_group'] expected.update(updates) for k, v in expected.items(): self.assertEqual(res['port_pair_group'][k], v) req = self.new_show_request( 'port_pair_groups', pg['port_pair_group']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) for k, v in expected.items(): self.assertEqual(res['port_pair_group'][k], v) def test_update_port_pair_group_consistency_checks(self): with self.port( name='port1', device_id='default' ) as port1, self.port( name='port2', device_id='default' ) as port2, self.port( name='port3', device_id='default' ) as port3, self.port( name='port4', device_id='default' ) as port4: with self.port_pair(port_pair={ 'ingress': port1['port']['id'], 'egress': port2['port']['id'], 'service_function_parameters': {'correlation': 'mpls'} }) as pp1, self.port_pair(port_pair={ 'ingress': port2['port']['id'], 'egress': port3['port']['id'], 'service_function_parameters': {'correlation': 'mpls'} }) as pp2, self.port_pair(port_pair={ 'ingress': port3['port']['id'], 'egress': port4['port']['id'], 'service_function_parameters': {'correlation': None} }) as pp3, self.port_pair(port_pair={ 'ingress': port4['port']['id'], 'egress': port1['port']['id'], 'service_function_parameters': {'correlation': 'mpls'} }) as pp4: with self.port_pair_group(port_pair_group={ 'name': 'test1', 'description': 'desc1', 'port_pairs': [pp1['port_pair']['id'], pp2['port_pair']['id']] }) as pg: updates = { 'name': 'test2', 'description': 'desc2', 'port_pairs': [pp1['port_pair']['id'], pp2['port_pair']['id'], pp3['port_pair']['id']] } req = self.new_update_request( 'port_pair_groups', {'port_pair_group': updates}, pg['port_pair_group']['id'] ) resp = req.get_response(self.ext_api) self.assertEqual(400, resp.status_int) updates = { 'name': 'test3', 'description': 'desc3', 'port_pairs': [pp1['port_pair']['id'], pp2['port_pair']['id'], pp4['port_pair']['id']] } req = self.new_update_request( 'port_pair_groups', {'port_pair_group': updates}, pg['port_pair_group']['id'] ) resp = req.get_response(self.ext_api) res = self.deserialize(self.fmt, resp) expected = pg['port_pair_group'] expected.update(updates) for k, v in expected.items(): self.assertEqual(res['port_pair_group'][k], v) req = self.new_show_request( 'port_pair_groups', pg['port_pair_group']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) for k, v in expected.items(): self.assertEqual(res['port_pair_group'][k], v) def test_update_tap_port_pair_group_consistency(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pp1, self.port_pair(port_pair={ 'ingress': dst_port['port']['id'], 'egress': src_port['port']['id'] }) as pp2: with self.port_pair_group(port_pair_group={ 'name': 'test1', 'description': 'desc1', 'port_pairs': [pp1['port_pair']['id']], }) as pg: updates = { 'name': 'test2', 'description': 'desc2', 'port_pairs': [pp1['port_pair']['id'], pp2['port_pair']['id']], 'tap_enabled': True } req = self.new_update_request( 'port_pair_groups', {'port_pair_group': updates}, pg['port_pair_group']['id'] ) resp = req.get_response(self.ext_api) self.assertEqual(400, resp.status_int) def test_delete_port_pair_group(self): with self.port_pair_group(port_pair_group={ 'name': 'test1' }, do_delete=False) as pc: req = self.new_delete_request( 'port_pair_groups', pc['port_pair_group']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(204, res.status_int) req = self.new_show_request( 'port_pair_groups', pc['port_pair_group']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(404, res.status_int) def test_delete_port_pair_group_port_chain_exist(self): with self.port_pair_group(port_pair_group={ 'name': 'test1' }) as pg: with self.port_chain(port_chain={ 'port_pair_groups': [pg['port_pair_group']['id']] }): req = self.new_delete_request( 'port_pair_groups', pg['port_pair_group']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(409, res.status_int) def test_delete_port_pair_group_noexist(self): req = self.new_delete_request( 'port_pair_groups', '1' ) res = req.get_response(self.ext_api) self.assertEqual(404, res.status_int) def test_create_port_pair(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: self._test_create_port_pair({ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) def test_quota_create_port_pair_quota(self): cfg.CONF.set_override('quota_port_pair', 3, group='QUOTAS') with self.port( name='port1', device_id='default' ) as src_port1, self.port( name='port2', device_id='default' ) as dst_port1, self.port( name='port3', device_id='default' ) as src_port2, self.port( name='port4', device_id='default' ) as dst_port2, self.port( name='port5', device_id='default' ) as src_port3, self.port( name='port6', device_id='default' ) as dst_port3, self.port( name='port7', device_id='default' ) as src_port4, self.port( name='port8', device_id='default' ) as dst_port4: self._create_port_pair( self.fmt, { 'ingress': src_port1['port']['id'], 'egress': dst_port1['port']['id'] }, expected_res_status=201) self._create_port_pair( self.fmt, { 'ingress': src_port2['port']['id'], 'egress': dst_port2['port']['id'] }, expected_res_status=201) self._create_port_pair( self.fmt, { 'ingress': src_port3['port']['id'], 'egress': dst_port3['port']['id'] }, expected_res_status=201) self._create_port_pair( self.fmt, { 'ingress': src_port4['port']['id'], 'egress': dst_port4['port']['id'] }, expected_res_status=409) def test_create_port_pair_all_fields(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: self._test_create_port_pair({ 'name': 'test1', 'description': 'desc1', 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'], 'service_function_parameters': { 'correlation': None, 'weight': 2} }) def test_create_port_pair_none_service_function_parameters(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: self._test_create_port_pair({ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'], 'service_function_parameters': None }) def test_create_port_pair_empty_service_function_parameters(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: self._test_create_port_pair({ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'], 'service_function_parameters': {} }) def test_create_port_pair_with_src_dst_same_port(self): with self.port( name='port1', device_id='default' ) as src_dst_port: self._test_create_port_pair({ 'ingress': src_dst_port['port']['id'], 'egress': src_dst_port['port']['id'] }) def test_create_port_pair_empty_input(self): self._create_port_pair(self.fmt, {}, expected_res_status=400) def test_create_port_pair_with_no_ingress(self): with self.port( name='port1', device_id='default' ) as dst_port: self._create_port_pair( self.fmt, { 'egress': dst_port['port']['id'] }, expected_res_status=400 ) def test_create_port_pair_with_no_egress(self): with self.port( name='port1', device_id='default' ) as src_port: self._create_port_pair( self.fmt, { 'ingress': src_port['port']['id'] }, expected_res_status=400 ) def test_create_port_pair_with_nouuid_ingress(self): with self.port( name='port1', device_id='default' ) as dst_port: self._create_port_pair( self.fmt, { 'ingress': '1', 'egress': dst_port['port']['id'] }, expected_res_status=400 ) def test_create_port_pair_with_unknown_ingress(self): with self.port( name='port1', device_id='default' ) as dst_port: self._create_port_pair( self.fmt, { 'ingress': uuidutils.generate_uuid(), 'egress': dst_port['port']['id'] }, expected_res_status=404 ) def test_create_port_pair_with_nouuid_egress(self): with self.port( name='port1', device_id='default' ) as src_port: self._create_port_pair( self.fmt, { 'ingress': src_port['port']['id'], 'egress': '1' }, expected_res_status=400 ) def test_create_port_pair_with_unknown_egress(self): with self.port( name='port1', device_id='default' ) as src_port: self._create_port_pair( self.fmt, { 'ingress': src_port['port']['id'], 'egress': uuidutils.generate_uuid() }, expected_res_status=404 ) def test_create_port_pair_ingress_egress_different_hosts(self): with self.port( name='port1', device_id='device1' ) as src_port, self.port( name='port2', device_id='device2' ) as dst_port: self._create_port_pair( self.fmt, { 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }, expected_res_status=400 ) def test_create_port_pair_with_invalid_service_function_parameters(self): with self.port( name='port1', device_id='default' ) as src_dst_port: self._create_port_pair( self.fmt, { 'ingress': src_dst_port['port']['id'], 'egress': src_dst_port['port']['id'], 'service_function_parameters': {'abc': 'def'} }, expected_res_status=400 ) def test_create_port_pair_with_invalid_correlation(self): with self.port( name='port1', device_id='default' ) as src_dst_port: self._create_port_pair( self.fmt, { 'ingress': src_dst_port['port']['id'], 'egress': src_dst_port['port']['id'], 'service_function_parameters': {'correlation': 'def'} }, expected_res_status=400 ) def test_create_port_pair_with_invalid_weight(self): with self.port( name='port1', device_id='default' ) as src_dst_port: self._create_port_pair( self.fmt, { 'ingress': src_dst_port['port']['id'], 'egress': src_dst_port['port']['id'], 'service_function_parameters': {'weight': -1} }, expected_res_status=400 ) def test_list_port_pairs(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pc1, self.port_pair(port_pair={ 'ingress': dst_port['port']['id'], 'egress': src_port['port']['id'] }) as pc2: port_pairs = [pc1, pc2] self._test_list_resources( 'port_pair', port_pairs ) def test_list_port_pairs_with_params(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'name': 'test1', 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pc1, self.port_pair(port_pair={ 'name': 'test2', 'ingress': dst_port['port']['id'], 'egress': src_port['port']['id'] }) as pc2: self._test_list_resources( 'port_pair', [pc1], query_params='name=test1' ) self._test_list_resources( 'port_pair', [pc2], query_params='name=test2' ) self._test_list_resources( 'port_pair', [], query_params='name=test3' ) def test_list_port_pairs_with_unknown_params(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'name': 'test1', 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pc1, self.port_pair(port_pair={ 'name': 'test2', 'ingress': dst_port['port']['id'], 'egress': src_port['port']['id'] }) as pc2: port_pairs = [pc1, pc2] self._test_list_resources( 'port_pair', port_pairs, query_params='hello=test3' ) def test_show_port_pair(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pc: req = self.new_show_request( 'port_pairs', pc['port_pair']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) for k, v in pc['port_pair'].items(): self.assertEqual(res['port_pair'][k], v) def test_show_port_pair_noexist(self): req = self.new_show_request( 'port_pairs', '1' ) res = req.get_response(self.ext_api) self.assertEqual(404, res.status_int) def test_update_port_pair(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'name': 'test1', 'description': 'desc1', 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pc: updates = { 'name': 'test2', 'description': 'desc2' } req = self.new_update_request( 'port_pairs', {'port_pair': updates}, pc['port_pair']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) expected = pc['port_pair'] expected.update(updates) for k, v in expected.items(): self.assertEqual(res['port_pair'][k], v) req = self.new_show_request( 'port_pairs', pc['port_pair']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) for k, v in expected.items(): self.assertEqual(res['port_pair'][k], v) def test_update_port_pair_service_function_parameters(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'name': 'test1', 'description': 'desc1', 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pc: updates = { 'service_function_parameters': { 'correlation': None, 'weight': 2, } } req = self.new_update_request( 'port_pairs', {'port_pair': updates}, pc['port_pair']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(400, res.status_int) def test_update_port_pair_ingress(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'name': 'test1', 'description': 'desc1', 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pc: updates = { 'ingress': dst_port['port']['id'] } req = self.new_update_request( 'port_pairs', {'port_pair': updates}, pc['port_pair']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(400, res.status_int) def test_update_port_pair_egress(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'name': 'test1', 'description': 'desc1', 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pc: updates = { 'egress': src_port['port']['id'] } req = self.new_update_request( 'port_pairs', {'port_pair': updates}, pc['port_pair']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(400, res.status_int) def test_delete_port_pair(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }, do_delete=False) as pc: req = self.new_delete_request( 'port_pairs', pc['port_pair']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(204, res.status_int) req = self.new_show_request( 'port_pairs', pc['port_pair']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(404, res.status_int) def test_delete_port_pair_noexist(self): req = self.new_delete_request( 'port_pairs', '1' ) res = req.get_response(self.ext_api) self.assertEqual(404, res.status_int) def test_delete_port_pair_port_pair_group_exist(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }) as pp: with self.port_pair_group(port_pair_group={ 'port_pairs': [pp['port_pair']['id']] }): req = self.new_delete_request( 'port_pairs', pp['port_pair']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(409, res.status_int) def test_delete_ingress_port_pair_exist(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }): req = self.new_delete_request( 'ports', src_port['port']['id'] ) res = req.get_response(self.api) self.assertEqual(500, res.status_int) def test_delete_egress_port_pair_exist(self): with self.port( name='port1', device_id='default' ) as src_port, self.port( name='port2', device_id='default' ) as dst_port: with self.port_pair(port_pair={ 'ingress': src_port['port']['id'], 'egress': dst_port['port']['id'] }): req = self.new_delete_request( 'ports', dst_port['port']['id'] ) res = req.get_response(self.api) self.assertEqual(500, res.status_int) def _test_create_service_graph_branching_ppg( self, src_corr, dst_corr, status): with self.port( name='port1', device_id='default' ) as port1, self.port( name='port2', device_id='default' ) as port2, self.port( name='port3', device_id='default' ) as port3, self.port( name='port4', device_id='default' ) as port4: with self.port_pair(port_pair={ 'ingress': port1['port']['id'], 'egress': port1['port']['id'], 'service_function_parameters': {'correlation': 'mpls'} }, do_delete=False) as pp1, self.port_pair(port_pair={ 'ingress': port2['port']['id'], 'egress': port2['port']['id'], 'service_function_parameters': {'correlation': src_corr} }, do_delete=False) as pp2, self.port_pair(port_pair={ 'ingress': port3['port']['id'], 'egress': port3['port']['id'], 'service_function_parameters': {'correlation': dst_corr} }, do_delete=False) as pp3, self.port_pair(port_pair={ 'ingress': port4['port']['id'], 'egress': port4['port']['id'], 'service_function_parameters': {'correlation': 'mpls'} }, do_delete=False) as pp4: with self.port_pair_group( port_pair_group={'port_pairs': [pp1['port_pair']['id']]}, do_delete=False ) as pg1, self.port_pair_group( port_pair_group={'port_pairs': [pp2['port_pair']['id']]}, do_delete=False ) as pg2, self.port_pair_group( port_pair_group={'port_pairs': [pp3['port_pair']['id']]}, do_delete=False ) as pg3, self.port_pair_group( port_pair_group={'port_pairs': [pp4['port_pair']['id']]}, do_delete=False ) as pg4: with self.port_chain( do_delete=False, port_chain={'port_pair_groups': [ pg1['port_pair_group']['id'], pg2['port_pair_group']['id']]} ) as pc1, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [ pg3['port_pair_group']['id'], pg4['port_pair_group']['id']]} ) as pc2: self._create_service_graph(self.fmt, { 'port_chains': { pc1['port_chain']['id']: [ pc2['port_chain']['id']] }, 'name': 'abc', 'description': 'def' }, expected_res_status=status) def test_create_service_graph_branching_ppg_no_src_corr_fail(self): self._test_create_service_graph_branching_ppg(None, 'mpls', 400) def test_create_service_graph_branching_ppg_no_dst_corr_fail(self): self._test_create_service_graph_branching_ppg('mpls', None, 400) def test_create_service_graph_branching_ppg_both_corrs_ok(self): self._test_create_service_graph_branching_ppg('mpls', 'mpls', 201) def test_create_service_graph_linear_dependency_only(self): # this test will create a graph consisting of 1 port chain being # dependent on 1 other port chain, thus with no branching. with self.port_pair_group( port_pair_group={}, do_delete=False ) as pg1, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg2: with self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]} ) as pc1, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]} ) as pc2: self._create_service_graph(self.fmt, { 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id']] }, 'name': 'abc', 'description': 'def' }, expected_res_status=201) def test_create_service_graph_branching_no_class(self): # this test will create a graph where 1 port chain will act # as a dependency to 2 other port chains, effectively # creating a branching service function chain. with self.port_pair_group( port_pair_group={}, do_delete=False ) as pg1, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg2, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg3: with self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]} ) as pc1, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]} ) as pc2, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]} ) as pc3: self._create_service_graph(self.fmt, { 'port_chains': { pc1['port_chain']['id']: [ pc2['port_chain']['id'], pc3['port_chain']['id'] ] }, 'name': 'abc', 'description': 'def' }, expected_res_status=201) def test_create_service_graph_same_chain_fail(self): # this test will attempt to create a graph with a single branching # point having 2 port chains - which are actually the same port chain. with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain( port_chain={ 'port_pair_groups': [pg1['port_pair_group']['id']] } ) as pc1, self.port_chain( port_chain={ 'port_pair_groups': [pg2['port_pair_group']['id']] } ) as pc2: self._create_service_graph(self.fmt, { 'port_chains': { pc1['port_chain']['id']: [ pc2['port_chain']['id'], pc2['port_chain']['id'] ] }, 'name': 'abc', 'description': 'def' }, expected_res_status=400) def test_create_service_graph_with_already_used_pcs_fail(self): # this test will attempt to create a graph that maps # port-chains which have already been mapped to other graphs. with self.port_pair_group( port_pair_group={}, do_delete=False ) as pg1, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg2, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg3: with self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]} ) as pc1, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]} ) as pc2, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]} ) as pc3: self._create_service_graph(self.fmt, { 'port_chains': { pc1['port_chain']['id']: [ pc2['port_chain']['id'] ] }, 'name': 'abc', 'description': 'def' }, expected_res_status=201) self._create_service_graph(self.fmt, { 'port_chains': { pc3['port_chain']['id']: [ pc1['port_chain']['id'] ] }, 'name': 'abc', 'description': 'def' }, expected_res_status=409) def test_create_service_graph_with_multiple_starts(self): # this test will create a graph with multiple starting chains (tails) with self.port_pair_group( port_pair_group={}, do_delete=False ) as pg1, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg2, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg3, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg4: with self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]} ) as pc1, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]} ) as pc2, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]} ) as pc3, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg4['port_pair_group']['id']]} ) as pc4: self._create_service_graph(self.fmt, { 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id']], pc3['port_chain']['id']: [pc4['port_chain']['id']], pc4['port_chain']['id']: [pc2['port_chain']['id']] }, 'name': 'abc', 'description': 'def' }, expected_res_status=201) def _test_create_service_graph_single_branching_two_fcs_each( self, fc1_dict, fc2_dict, fc3_dict, fc4_dict, expected_res_status ): with self.flow_classifier( flow_classifier=fc1_dict, do_delete=False ) as fc1, self.flow_classifier( flow_classifier=fc2_dict, do_delete=False ) as fc2, self.flow_classifier( flow_classifier=fc3_dict, do_delete=False ) as fc3, self.flow_classifier( flow_classifier=fc4_dict, do_delete=False ) as fc4: with self.port_pair_group( port_pair_group={}, do_delete=False ) as pg1, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg2, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg3: with self.port_chain( port_chain={ 'port_pair_groups': [pg1['port_pair_group']['id']] }, do_delete=False ) as pc1, self.port_chain( port_chain={ 'port_pair_groups': [pg2['port_pair_group']['id']], 'flow_classifiers': [ fc1['flow_classifier']['id'], fc2['flow_classifier']['id'] ] }, do_delete=False ) as pc2, self.port_chain( port_chain={ 'port_pair_groups': [pg3['port_pair_group']['id']], 'flow_classifiers': [ fc3['flow_classifier']['id'], fc4['flow_classifier']['id'] ] }, do_delete=False ) as pc3: self._create_service_graph(self.fmt, { 'port_chains': { pc1['port_chain']['id']: [ pc2['port_chain']['id'], pc3['port_chain']['id'] ] }, 'name': 'abc', 'description': 'def' }, expected_res_status=expected_res_status) def test_create_service_graph_unambiguous_branch(self): # this test will create a graph where 1 port chain will act # as a dependency to 2 other port chains, using different # classifications for the dependent chains, which must succeed. with self.port( name='test1', do_delete=False ) as port1, self.port( name='test2', do_delete=False ) as port2, self.port( name='test3', do_delete=False ) as port3, self.port( name='test4', do_delete=False ) as port4: fc1_dict = { 'name': 'fc1', 'ethertype': 'IPv4', 'protocol': 'tcp', 'logical_source_port': port1['port']['id'] } fc2_dict = { 'name': 'fc2', 'ethertype': 'IPv6', 'protocol': 'tcp', 'logical_source_port': port2['port']['id'] } fc3_dict = { 'name': 'fc3', 'ethertype': 'IPv4', 'protocol': 'udp', 'logical_source_port': port3['port']['id'] } fc4_dict = { 'name': 'fc4', 'ethertype': 'IPv6', 'protocol': 'udp', 'logical_source_port': port4['port']['id'] } self._test_create_service_graph_single_branching_two_fcs_each( fc1_dict, fc2_dict, fc3_dict, fc4_dict, expected_res_status=201) def test_create_service_graph_with_direct_loop_fail(self): # this test will attempt to create a graph where there is a direct # loop, i.e. a chain linked to itself - specifically pc2->pc2. with self.port_pair_group( port_pair_group={}, do_delete=False ) as pg1, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg2: with self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]} ) as pc1, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]} ) as pc2: self._create_service_graph(self.fmt, { 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id']], pc2['port_chain']['id']: [pc2['port_chain']['id']] }, 'name': 'abc', 'description': 'def' }, expected_res_status=400) def test_create_service_graph_with_indirect_loop_fail(self): # this test will attempt to create a graph where there is an indirect # loop, i.e. a chain is linked to a chain providing a path back to # the first chain again - specifically pc2->pc3->pc4->pc2. with self.port_pair_group( port_pair_group={}, do_delete=False ) as pg1, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg2, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg3, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg4, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg5: with self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]} ) as pc1, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]} ) as pc2, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]} ) as pc3, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg4['port_pair_group']['id']]} ) as pc4, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg5['port_pair_group']['id']]} ) as pc5: self._create_service_graph(self.fmt, { 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id']], pc2['port_chain']['id']: [pc3['port_chain']['id']], pc3['port_chain']['id']: [pc4['port_chain']['id']], pc4['port_chain']['id']: [ pc2['port_chain']['id'], pc5['port_chain']['id'] ] }, 'name': 'abc', 'description': 'def' }, expected_res_status=400) def test_create_service_graph_with_inexistent_port_chains(self): # this test will attempt to create a graph where one # of the referenced port chains do not exist, and fail. with self.port_pair_group( port_pair_group={}, do_delete=False ) as pg1, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg2, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg3: with self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]} ) as pc1, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]} ) as pc2, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]} ) as pc3: self._create_service_graph(self.fmt, { 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id']], pc2['port_chain']['id']: [ pc3['port_chain']['id'], uuidutils.generate_uuid() ] }, 'name': 'abc', 'description': 'def' }, expected_res_status=404) def test_create_service_graph_with_joining_branches(self): # this test will create a graph that including "joining" branches, i.e. # a set of at least 2 branches that will be linked to the same next # port chain, thus joining traffic at that point. with self.port_pair_group( port_pair_group={}, do_delete=False ) as pg1, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg2, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg3, self.port_pair_group( port_pair_group={}, do_delete=False ) as pg4: with self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]} ) as pc1, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]} ) as pc2, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]} ) as pc3, self.port_chain( do_delete=False, port_chain={'port_pair_groups': [pg4['port_pair_group']['id']]} ) as pc4: self._create_service_graph(self.fmt, { 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id']], pc2['port_chain']['id']: [ pc3['port_chain']['id'], pc4['port_chain']['id'] ], pc3['port_chain']['id']: [pc4['port_chain']['id']] }, 'name': 'abc', 'description': 'def' }, expected_res_status=201) def test_update_service_graph(self): with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain( port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]} ) as pc1, self.port_chain( port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]} ) as pc2: with self.service_graph(service_graph={ 'name': 'test1', 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id']] } }) as graph: updates = { 'name': 'test2', 'description': 'desc2' } req = self.new_update_request( 'service_graphs', {'service_graph': updates}, graph['service_graph']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) expected = graph['service_graph'] expected.update(updates) for k, v in expected.items(): self.assertEqual(res['service_graph'][k], v) req = self.new_show_request( 'service_graphs', graph['service_graph']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) for k, v in expected.items(): self.assertEqual(res['service_graph'][k], v) def test_delete_service_graph(self): with self.port_pair_group( port_pair_group={} ) as pg1, self.port_pair_group( port_pair_group={} ) as pg2: with self.port_chain( port_chain={ 'port_pair_groups': [pg1['port_pair_group']['id']]}, ) as pc1, self.port_chain( port_chain={ 'port_pair_groups': [pg2['port_pair_group']['id']]}, ) as pc2: with self.service_graph(service_graph={ 'name': 'test1', 'port_chains': { pc1['port_chain']['id']: [pc2['port_chain']['id']] } }, do_delete=False) as graph: req = self.new_delete_request( 'service_graphs', graph['service_graph']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(204, res.status_int) req = self.new_show_request( 'service_graphs', graph['service_graph']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(404, res.status_int) req = self.new_show_request( 'port_chains', pc1['port_chain']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(200, res.status_int) req = self.new_show_request( 'port_chains', pc2['port_chain']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(200, res.status_int) networking-sfc-10.0.0/networking_sfc/tests/unit/db/test_flowclassifier_db.py0000664000175000017500000017615613656750333027427 0ustar zuulzuul00000000000000# Copyright 2017 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import logging from unittest import mock from neutron.api import extensions as api_ext from neutron.common import config import neutron.extensions as nextensions from neutron_lib import constants as lib_const from oslo_config import cfg from oslo_utils import importutils from oslo_utils import uuidutils import webob.exc from networking_sfc.db import flowclassifier_db as fdb from networking_sfc import extensions from networking_sfc.extensions import flowclassifier as fc_ext from networking_sfc.tests import base DB_FLOWCLASSIFIER_PLUGIN_CLASS = ( "networking_sfc.db.flowclassifier_db.FlowClassifierDbPlugin" ) extensions_path = ':'.join(extensions.__path__ + nextensions.__path__) class FlowClassifierDbPluginTestCaseBase(base.BaseTestCase): def _create_flow_classifier( self, fmt, flow_classifier=None, expected_res_status=None, **kwargs ): ctx = kwargs.get('context', None) tenant_id = kwargs.get('tenant_id', self._tenant_id) data = {'flow_classifier': flow_classifier or {}} if ctx is None: data['flow_classifier'].update({'tenant_id': tenant_id}) req = self.new_create_request( 'flow_classifiers', data, fmt, context=ctx ) res = req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, res.status_int) return res @contextlib.contextmanager def flow_classifier( self, fmt=None, flow_classifier=None, do_delete=True, **kwargs ): if not fmt: fmt = self.fmt res = self._create_flow_classifier(fmt, flow_classifier, **kwargs) if res.status_int >= 400: logging.error('create flow classifier res: %s', res) raise webob.exc.HTTPClientError( code=res.status_int) flow_classifier = self.deserialize(fmt or self.fmt, res) yield flow_classifier if do_delete: self._delete('flow_classifiers', flow_classifier['flow_classifier']['id']) def _get_expected_flow_classifier(self, flow_classifier): expected_flow_classifier = { 'name': flow_classifier.get('name') or '', 'description': flow_classifier.get('description') or '', 'source_port_range_min': flow_classifier.get( 'source_port_range_min'), 'source_port_range_max': flow_classifier.get( 'source_port_range_max'), 'destination_port_range_min': flow_classifier.get( 'destination_port_range_min'), 'destination_port_range_max': flow_classifier.get( 'destination_port_range_max'), 'source_ip_prefix': flow_classifier.get( 'source_ip_prefix'), 'destination_ip_prefix': flow_classifier.get( 'destination_ip_prefix'), 'logical_source_port': flow_classifier.get( 'logical_source_port'), 'logical_destination_port': flow_classifier.get( 'logical_destination_port'), 'ethertype': flow_classifier.get( 'ethertype') or 'IPv4', 'protocol': flow_classifier.get( 'protocol'), 'l7_parameters': flow_classifier.get( 'l7_parameters') or {} } return expected_flow_classifier def _assert_flow_classifiers_match_subsets(self, flow_classifiers, subsets, sort_key=None): # Sort both lists if sort_key: flow_classifiers.sort(key=lambda fc: fc[sort_key]) subsets.sort(key=lambda fc: fc[sort_key]) for fc, subset in zip(flow_classifiers, subsets): # Get matching items from subset sub_fc = dict([(k, fc[k]) for k in subset.keys() if k in fc.keys()]) self.assertEqual(subset, sub_fc) def _test_create_flow_classifier( self, flow_classifier, expected_flow_classifier=None ): if expected_flow_classifier is None: expected_flow_classifier = self._get_expected_flow_classifier( flow_classifier) with self.flow_classifier(flow_classifier=flow_classifier) as fc: for k, v in expected_flow_classifier.items(): self.assertIn(k, fc['flow_classifier']) self.assertEqual(fc['flow_classifier'][k], v) class FlowClassifierDbPluginTestCase( base.NeutronDbPluginV2TestCase, FlowClassifierDbPluginTestCaseBase ): resource_prefix_map = dict( (k, fc_ext.FLOW_CLASSIFIER_PREFIX) for k in fc_ext.RESOURCE_ATTRIBUTE_MAP.keys() ) def setUp(self, core_plugin=None, flowclassifier_plugin=None, ext_mgr=None): mock_log_p = mock.patch.object(fdb, 'LOG') self.mock_log = mock_log_p.start() cfg.CONF.register_opts(fc_ext.flow_classifier_quota_opts, 'QUOTAS') if not flowclassifier_plugin: flowclassifier_plugin = DB_FLOWCLASSIFIER_PLUGIN_CLASS service_plugins = { fc_ext.FLOW_CLASSIFIER_EXT: flowclassifier_plugin } fdb.FlowClassifierDbPlugin.supported_extension_aliases = [ fc_ext.FLOW_CLASSIFIER_EXT] fdb.FlowClassifierDbPlugin.path_prefix = ( fc_ext.FLOW_CLASSIFIER_PREFIX ) super(FlowClassifierDbPluginTestCase, self).setUp( ext_mgr=ext_mgr, plugin=core_plugin, service_plugins=service_plugins ) if not ext_mgr: self.flowclassifier_plugin = importutils.import_object( flowclassifier_plugin) ext_mgr = api_ext.PluginAwareExtensionManager( extensions_path, {fc_ext.FLOW_CLASSIFIER_EXT: self.flowclassifier_plugin} ) app = config.load_paste_app('extensions_test_app') self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr) def test_create_flow_classifier(self): with self.port( name='test1' ) as port: self._test_create_flow_classifier({ 'logical_source_port': port['port']['id'] }) def test_create_flow_classifier_with_logical_destination_port(self): with self.port( name='test1' ) as src_port, self.port( name='test1' ) as dst_port: self._test_create_flow_classifier({ 'logical_source_port': src_port['port']['id'], 'logical_destination_port': dst_port['port']['id'] }) def test_quota_create_flow_classifier(self): cfg.CONF.set_override('quota_flow_classifier', 3, group='QUOTAS') with self.port( name='test1' ) as port: self._create_flow_classifier( self.fmt, { 'source_ip_prefix': '10.100.0.0/16', 'logical_source_port': port['port']['id'] }, expected_res_status=201) self._create_flow_classifier( self.fmt, { 'source_ip_prefix': '10.101.0.0/16', 'logical_source_port': port['port']['id'] }, expected_res_status=201) self._create_flow_classifier( self.fmt, { 'source_ip_prefix': '10.102.0.0/16', 'logical_source_port': port['port']['id'] }, expected_res_status=201) self._create_flow_classifier( self.fmt, { 'source_ip_prefix': '10.103.0.0/16', 'logical_source_port': port['port']['id'] }, expected_res_status=409) def test_create_flow_classifier_with_all_fields(self): with self.port( name='test1' ) as port: self._test_create_flow_classifier({ 'name': 'test1', 'ethertype': lib_const.IPv4, 'protocol': lib_const.PROTO_NAME_TCP, 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 101, 'destination_port_range_max': 201, 'source_ip_prefix': '10.100.0.0/16', 'destination_ip_prefix': '10.200.0.0/16', 'logical_source_port': port['port']['id'], 'logical_destination_port': None, 'l7_parameters': {} }) def test_create_flow_classifier_with_all_supported_ethertype(self): with self.port( name='test1' ) as port: self._test_create_flow_classifier({ 'ethertype': None, 'logical_source_port': port['port']['id'] }) self._test_create_flow_classifier({ 'ethertype': 'IPv4', 'logical_source_port': port['port']['id'] }) self._test_create_flow_classifier({ 'ethertype': 'IPv6', 'logical_source_port': port['port']['id'] }) def test_create_flow_classifier_with_invalid_ethertype(self): with self.port( name='test1' ) as port: self._create_flow_classifier( self.fmt, { 'ethertype': 'unsupported', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) def test_create_flow_classifier_with_all_supported_protocol(self): with self.port( name='test1' ) as port: self._test_create_flow_classifier({ 'protocol': None, 'logical_source_port': port['port']['id'] }) self._test_create_flow_classifier({ 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) self._test_create_flow_classifier({ 'protocol': lib_const.PROTO_NAME_UDP, 'logical_source_port': port['port']['id'] }) self._test_create_flow_classifier({ 'protocol': lib_const.PROTO_NAME_ICMP, 'logical_source_port': port['port']['id'] }) def test_create_flow_classifier_with_invalid_protocol(self): with self.port( name='test1' ) as port: self._create_flow_classifier( self.fmt, { 'protocol': 'unsupported', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) def test_create_flow_classifier_with_all_supported_port_protocol(self): with self.port( name='test1' ) as port: self._test_create_flow_classifier({ 'source_port_range_min': None, 'source_port_range_max': None, 'destination_port_range_min': None, 'destination_port_range_max': None, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) self._test_create_flow_classifier({ 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 100, 'destination_port_range_max': 200, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) self._test_create_flow_classifier({ 'source_port_range_min': 100, 'source_port_range_max': 100, 'destination_port_range_min': 100, 'destination_port_range_max': 100, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) self._test_create_flow_classifier({ 'source_port_range_min': '100', 'source_port_range_max': '200', 'destination_port_range_min': '100', 'destination_port_range_max': '200', 'protocol': lib_const.PROTO_NAME_UDP, 'logical_source_port': port['port']['id'] }, { 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 100, 'destination_port_range_max': 200, 'protocol': lib_const.PROTO_NAME_UDP, 'logical_source_port': port['port']['id'] }) def test_create_flow_classifier_with_invalid_ip_prefix_ethertype(self): with self.port( name='test1' ) as port: self._create_flow_classifier( self.fmt, { 'source_ip_prefix': '192.168.100.0/24', 'ethertype': 'IPv6', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_ip_prefix': 'ff::0/24', 'ethertype': 'IPv4', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) def test_create_flow_classifier_with_invalid_port_protocol(self): with self.port( name='test1' ) as port: self._create_flow_classifier( self.fmt, { 'source_port_range_min': 'abc', 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_max': 'abc', 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_min': 100, 'source_port_range_max': 99, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_min': 65536, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_max': 65536, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_min': -1, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_max': -1, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_min': 'abc', 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_max': 'abc', 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_min': 100, 'destination_port_range_max': 99, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_min': 65536, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_max': 65536, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_min': -1, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_max': -1, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_min': 100, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_max': 100, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_min': 100, 'source_port_range_max': 200, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_min': 100, 'source_port_range_max': 200, 'protocol': lib_const.PROTO_NAME_ICMP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_min': 100, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_max': 100, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_min': 100, 'destination_port_range_max': 200, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_min': 100, 'destination_port_range_max': 200, 'protocol': lib_const.PROTO_NAME_ICMP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) def test_create_flow_classifier_with_all_supported_ip_prefix(self): with self.port( name='test1' ) as port: self._test_create_flow_classifier({ 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port['port']['id'] }) self._test_create_flow_classifier({ 'destination_ip_prefix': '192.168.100.0/24', 'logical_source_port': port['port']['id'] }) def test_create_flow_classifier_with_invalid_ip_prefix(self): with self.port( name='test1' ) as port: self._create_flow_classifier( self.fmt, { 'source_ip_prefix': '10.0.0.0/34', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_ip_prefix': '10.0.0.0.0/8', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_ip_prefix': '256.0.0.0/8', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_ip_prefix': '10.0.0.0', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_ip_prefix': '10.0.0.0/34', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_ip_prefix': '10.0.0.0.0/8', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_ip_prefix': '256.0.0.0/8', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_ip_prefix': '10.0.0.0', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) def test_create_flow_classifier_with_all_supported_l7_parameters(self): with self.port( name='test1' ) as port: self._test_create_flow_classifier({ 'l7_parameters': None, 'logical_source_port': port['port']['id'] }) self._test_create_flow_classifier({ 'l7_parameters': {}, 'logical_source_port': port['port']['id'] }) def test_create_flow_classifier_with_invalid_l7_parameters(self): with self.port( name='test1' ) as port: self._create_flow_classifier( self.fmt, { 'l7_parameters': {'abc': 'def'}, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) def test_create_flow_classifier_with_port_id(self): with self.port( name='test1' ) as port, self.port( name='test1' ) as port2: self._test_create_flow_classifier({ 'logical_source_port': port['port']['id'], 'logical_destination_port': None, }) self._test_create_flow_classifier({ 'logical_source_port': port['port']['id'], 'logical_destination_port': port2['port']['id'], }) def test_create_flow_classifier_with_nouuid_port_id(self): with self.port( name='test1' ) as port: self._create_flow_classifier( self.fmt, { 'logical_source_port': 'abc' }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'logical_source_port': port['port']['id'], 'logical_destination_port': 'abc' }, expected_res_status=400 ) def test_create_flow_classifier_ethertype_conflict(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'logical_source_port': port['port']['id'] }): self._create_flow_classifier( self.fmt, { 'ethertype': 'IPv4', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._test_create_flow_classifier({ 'ethertype': 'IPv6', 'logical_source_port': port['port']['id'] }) with self.flow_classifier(flow_classifier={ 'name': 'test1', 'ethertype': 'IPv4', 'logical_source_port': port['port']['id'] }): self._create_flow_classifier( self.fmt, { 'ethertype': 'IPv4', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._test_create_flow_classifier({ 'ethertype': 'IPv6', 'logical_source_port': port['port']['id'] }) with self.flow_classifier(flow_classifier={ 'name': 'test1', 'ethertype': 'IPv6', 'logical_source_port': port['port']['id'] }): self._create_flow_classifier( self.fmt, { 'ethertype': 'IPv6', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._test_create_flow_classifier({ 'logical_source_port': port['port']['id'] }) self._test_create_flow_classifier({ 'ethertype': 'IPv4', 'logical_source_port': port['port']['id'] }) def test_create_flow_classifier_protocol_conflict(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'logical_source_port': port['port']['id'] }): self._create_flow_classifier( self.fmt, { 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) with self.flow_classifier(flow_classifier={ 'name': 'test1', 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }): self._create_flow_classifier( self.fmt, { 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._test_create_flow_classifier({ 'protocol': lib_const.PROTO_NAME_UDP, 'logical_source_port': port['port']['id'] }) def test_create_flow_classifier_source_ip_prefix_conflict(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'logical_source_port': port['port']['id'] }): self._create_flow_classifier( self.fmt, { 'source_ip_prefix': '10.100.0.0/16' }, expected_res_status=400 ) with self.flow_classifier(flow_classifier={ 'name': 'test1', 'source_ip_prefix': '10.100.0.0/16', 'logical_source_port': port['port']['id'] }): self._create_flow_classifier( self.fmt, { 'source_ip_prefix': '10.100.0.0/16', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_ip_prefix': '10.100.100.0/24', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_ip_prefix': '10.0.0.0/8', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._test_create_flow_classifier({ 'source_ip_prefix': '10.101.0.0/16', 'logical_source_port': port['port']['id'] }) def test_create_flow_classifier_destination_ip_prefix_conflict(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'logical_source_port': port['port']['id'] }): self._create_flow_classifier( self.fmt, { 'destination_ip_prefix': '10.100.0.0/16' }, expected_res_status=400 ) with self.flow_classifier(flow_classifier={ 'name': 'test1', 'destination_ip_prefix': '10.100.0.0/16', 'logical_source_port': port['port']['id'] }): self._create_flow_classifier( self.fmt, { 'destination_ip_prefix': '10.100.0.0/16', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_ip_prefix': '10.100.100.0/24', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_ip_prefix': '10.0.0.0/8', 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._test_create_flow_classifier({ 'destination_ip_prefix': '10.101.0.0/16', 'logical_source_port': port['port']['id'] }) def test_create_flow_classifier_source_port_range_conflict(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }): self._create_flow_classifier( self.fmt, { 'source_port_range_min': 100, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_max': 200, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_min': 100, 'source_port_range_max': 200, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) with self.flow_classifier(flow_classifier={ 'name': 'test1', 'source_port_range_min': 100, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }): self._create_flow_classifier( self.fmt, { 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_min': 99, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_min': 101, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_max': 101, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._test_create_flow_classifier({ 'source_port_range_max': 99, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) with self.flow_classifier(flow_classifier={ 'name': 'test1', 'source_port_range_max': 100, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }): self._create_flow_classifier( self.fmt, { 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_max': 99, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_max': 101, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_min': 99, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._test_create_flow_classifier({ 'source_port_range_min': 101, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) with self.flow_classifier(flow_classifier={ 'name': 'test1', 'source_port_range_min': 100, 'source_port_range_max': 200, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }): self._create_flow_classifier( self.fmt, { 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_min': 99, 'source_port_range_max': 201, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_min': 101, 'source_port_range_max': 199, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_min': 99, 'source_port_range_max': 199, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_min': 101, 'source_port_range_max': 201, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_min': 99, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_min': 101, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_max': 199, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'source_port_range_max': 201, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._test_create_flow_classifier({ 'source_port_range_min': 201, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) self._test_create_flow_classifier({ 'source_port_range_min': 201, 'source_port_range_max': 300, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) self._test_create_flow_classifier({ 'source_port_range_max': 99, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) self._test_create_flow_classifier({ 'source_port_range_min': 50, 'source_port_range_max': 99, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) def test_create_flow_classifier_destination_port_range_conflict(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }): self._create_flow_classifier( self.fmt, { 'destination_port_range_min': 100, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_max': 200, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_min': 100, 'destination_port_range_max': 200, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) with self.flow_classifier(flow_classifier={ 'name': 'test1', 'destination_port_range_min': 100, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }): self._create_flow_classifier( self.fmt, { 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_min': 99, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_min': 101, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_max': 101, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._test_create_flow_classifier({ 'destination_port_range_max': 99, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) with self.flow_classifier(flow_classifier={ 'name': 'test1', 'destination_port_range_max': 100, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }): self._create_flow_classifier( self.fmt, { 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_max': 99, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_max': 101, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_min': 99, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._test_create_flow_classifier({ 'destination_port_range_min': 101, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) with self.flow_classifier(flow_classifier={ 'name': 'test1', 'destination_port_range_min': 100, 'destination_port_range_max': 200, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }): self._create_flow_classifier( self.fmt, { 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_min': 99, 'destination_port_range_max': 201, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_min': 101, 'destination_port_range_max': 199, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_min': 99, 'destination_port_range_max': 199, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_min': 101, 'destination_port_range_max': 201, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_min': 99, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_min': 101, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_max': 199, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._create_flow_classifier( self.fmt, { 'destination_port_range_max': 201, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }, expected_res_status=400 ) self._test_create_flow_classifier({ 'destination_port_range_min': 201, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) self._test_create_flow_classifier({ 'destination_port_range_min': 201, 'destination_port_range_max': 300, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) self._test_create_flow_classifier({ 'destination_port_range_max': 99, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) self._test_create_flow_classifier({ 'destination_port_range_min': 50, 'destination_port_range_max': 99, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) def test_create_flow_classifier_not_all_fields_conflict(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'source_ip_prefix': '192.168.100.0/24', 'destination_ip_prefix': '192.168.101.0/24', 'logical_source_port': port['port']['id'] }): self._test_create_flow_classifier({ 'source_ip_prefix': '192.168.100.0/24', 'destination_ip_prefix': '192.168.102.0/24', 'logical_source_port': port['port']['id'] }) with self.flow_classifier(flow_classifier={ 'name': 'test1', 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 100, 'destination_port_range_max': 200, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }): self._test_create_flow_classifier({ 'source_port_range_min': 100, 'source_port_range_max': 200, 'destination_port_range_min': 300, 'destination_port_range_max': 400, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) with self.flow_classifier(flow_classifier={ 'name': 'test1', 'source_ip_prefix': '192.168.100.0/24', 'source_port_range_min': 100, 'source_port_range_max': 200, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }): self._test_create_flow_classifier({ 'source_ip_prefix': '192.168.100.0/24', 'source_port_range_min': 300, 'source_port_range_max': 400, 'protocol': lib_const.PROTO_NAME_TCP, 'logical_source_port': port['port']['id'] }) def test_create_flow_classifier_with_unknown_port_id(self): with self.port( name='test1' ) as port: self._create_flow_classifier( self.fmt, { 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': uuidutils.generate_uuid() }, expected_res_status=404 ) self._create_flow_classifier( self.fmt, { 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port['port']['id'], 'logical_destination_port': uuidutils.generate_uuid() }, expected_res_status=404 ) def test_list_flow_classifiers(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'source_ip_prefix': '10.100.0.0/16', 'logical_source_port': port['port']['id'] }) as fc1, self.flow_classifier(flow_classifier={ 'name': 'test2', 'source_ip_prefix': '10.101.0.0/16', 'logical_source_port': port['port']['id'] }) as fc2: fcs = [fc1, fc2] self._test_list_resources( 'flow_classifier', fcs ) def test_list_flow_classifiers_with_params(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'source_ip_prefix': '10.100.0.0/16', 'logical_source_port': port['port']['id'] }) as fc1, self.flow_classifier(flow_classifier={ 'name': 'test2', 'source_ip_prefix': '10.101.0.0/16', 'logical_source_port': port['port']['id'] }) as fc2: self._test_list_resources( 'flow_classifier', [fc1], query_params='name=test1' ) self._test_list_resources( 'flow_classifier', [fc2], query_params='name=test2' ) self._test_list_resources( 'flow_classifier', [], query_params='name=test3' ) def test_list_flow_classifiers_with_unknown_params(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'source_ip_prefix': '10.100.0.0/16', 'logical_source_port': port['port']['id'] }) as fc1, self.flow_classifier(flow_classifier={ 'name': 'test2', 'source_ip_prefix': '10.101.0.0/16', 'logical_source_port': port['port']['id'] }) as fc2: self._test_list_resources( 'flow_classifier', [fc1, fc2], query_params='hello=test3' ) def test_show_flow_classifier(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'logical_source_port': port['port']['id'] }) as fc: req = self.new_show_request( 'flow_classifiers', fc['flow_classifier']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) for k, v in fc['flow_classifier'].items(): self.assertEqual(res['flow_classifier'][k], v) def test_show_flow_classifier_noexist(self): req = self.new_show_request( 'flow_classifiers', '1' ) res = req.get_response(self.ext_api) self.assertEqual(404, res.status_int) def test_update_flow_classifier(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'description': 'desc1', 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port['port']['id'] }) as fc: updates = { 'name': 'test2', 'description': 'desc2', } req = self.new_update_request( 'flow_classifiers', { 'flow_classifier': updates }, fc['flow_classifier']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) expected = fc['flow_classifier'] expected.update(updates) for k, v in expected.items(): self.assertEqual(res['flow_classifier'][k], v) req = self.new_show_request( 'flow_classifiers', fc['flow_classifier']['id'] ) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) for k, v in expected.items(): self.assertEqual(res['flow_classifier'][k], v) def _test_update_with_field( self, fc, updates, expected_status_code ): req = self.new_update_request( 'flow_classifiers', {'flow_classifier': updates}, fc['flow_classifier']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(expected_status_code, res.status_int) def test_update_flow_classifer_unsupported_fields(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'description': 'desc1', 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port['port']['id'] }) as fc: self._test_update_with_field( fc, {'ethertype': None}, 400) self._test_update_with_field( fc, {'protocol': None}, 400) self._test_update_with_field( fc, {'source_port_range_min': None}, 400) self._test_update_with_field( fc, {'source_port_range_max': None}, 400) self._test_update_with_field( fc, {'destination_port_range_min': None}, 400) self._test_update_with_field( fc, {'destination_port_range_max': None}, 400) self._test_update_with_field( fc, {'source_ip_prefix': '192.168.100.0/24'}, 400) self._test_update_with_field( fc, {'destination_ip_prefix': '192.168.100.0/24'}, 400) self._test_update_with_field( fc, {'l7_parameters': None}, 400) def test_delete_flow_classifier(self): with self.port( name='test1' ) as port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'source_ip_prefix': '192.168.100.0/24', 'logical_source_port': port['port']['id'] }, do_delete=False) as fc: req = self.new_delete_request( 'flow_classifiers', fc['flow_classifier']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(204, res.status_int) req = self.new_show_request( 'flow_classifiers', fc['flow_classifier']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(404, res.status_int) def test_delete_flow_classifier_noexist(self): req = self.new_delete_request( 'flow_classifiers', '1' ) res = req.get_response(self.ext_api) self.assertEqual(404, res.status_int) networking-sfc-10.0.0/networking_sfc/tests/functional/0000775000175000017500000000000013656750461023115 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/functional/__init__.py0000664000175000017500000000000013656750333025212 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/functional/services/0000775000175000017500000000000013656750461024740 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/functional/services/__init__.py0000664000175000017500000000000013656750333027035 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/functional/services/sfc/0000775000175000017500000000000013656750461025513 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/functional/services/sfc/__init__.py0000664000175000017500000000000013656750333027610 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/functional/services/sfc/agent/0000775000175000017500000000000013656750461026611 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/functional/services/sfc/agent/__init__.py0000664000175000017500000000000013656750333030706 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/functional/services/sfc/agent/extensions/0000775000175000017500000000000013656750461031010 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/functional/services/sfc/agent/extensions/__init__.py0000664000175000017500000000000013656750333033105 0ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/tests/functional/services/sfc/agent/extensions/test_ovs_agent_sfc_extension.pynetworking-sfc-10.0.0/networking_sfc/tests/functional/services/sfc/agent/extensions/test_ovs_agent_s0000664000175000017500000000216213656750333034300 0ustar zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.tests.functional.agent.l2 import base class TestOVSAgentSfcExtension(base.OVSAgentTestFramework): def setUp(self): super(TestOVSAgentSfcExtension, self).setUp() self.config.set_override('extensions', ['sfc'], 'agent') self.agent = self.create_agent() def test_run(self): self.agent._report_state() agent_state = self.agent.state_rpc.report_state.call_args[0][1] self.assertEqual(['sfc'], agent_state['configurations']['extensions']) networking-sfc-10.0.0/networking_sfc/tests/functional/db/0000775000175000017500000000000013656750461023502 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/functional/db/__init__.py0000664000175000017500000000000013656750333025577 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/tests/functional/db/test_migrations.py0000664000175000017500000000441113656750333027265 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.db.migration.alembic_migrations import external from neutron.db.migration import cli as migration from neutron.tests.functional.db import test_migrations from neutron.tests.unit import testlib_api from networking_sfc.db.migration.models import head # EXTERNAL_TABLES should contain all names of tables that are not related to # current repo. EXTERNAL_TABLES = set(external.TABLES) VERSION_TABLE = 'alembic_version_sfc' class _TestModelsMigrationsSFC(test_migrations._TestModelsMigrations): def db_sync(self, engine): cfg.CONF.set_override('connection', engine.url, group='database') for conf in migration.get_alembic_configs(): self.alembic_config = conf self.alembic_config.neutron_config = cfg.CONF migration.do_alembic_command(conf, 'upgrade', 'heads') def get_metadata(self): return head.get_metadata() def include_object(self, object_, name, type_, reflected, compare_to): if type_ == 'table' and (name.startswith('alembic') or name == VERSION_TABLE or name in EXTERNAL_TABLES): return False if type_ == 'index' and reflected and name.startswith("idx_autoinc_"): return False return True class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin, _TestModelsMigrationsSFC, testlib_api.SqlTestCaseLight): pass class TestModelsMigrationsPostgresql(testlib_api.PostgreSQLTestCaseMixin, _TestModelsMigrationsSFC, testlib_api.SqlTestCaseLight): pass networking-sfc-10.0.0/networking_sfc/tests/functional/db/test_models.py0000664000175000017500000000162113656750333026374 0ustar zuulzuul00000000000000# Copyright 2016 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.tests.functional.db import test_models class TestDBCreation(test_models.TestDBCreation): def test_head_creation(self): super(TestDBCreation, self).test_head_creation() from networking_sfc.db.migration.models import head self._test_creation(head) networking-sfc-10.0.0/networking_sfc/tests/functional/test_service.py0000664000175000017500000000524113656750333026166 0ustar zuulzuul00000000000000# Copyright 2016 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import os.path import signal from oslo_config import cfg from neutron.common import utils from neutron.tests.functional import test_server from networking_sfc.services.flowclassifier.common import ( # noqa config as fc_config) # noqa from networking_sfc.services.sfc.common import ( # noqa config as sfc_config) # noqa class TestService(test_server.TestPluginWorker): def _fake_start(self): with open(self.temp_file, 'ab') as f: f.write(test_server.FAKE_START_MSG) def _fake_reset(self): with open(self.temp_file, 'ab') as f: f.write(test_server.FAKE_RESET_MSG) def _test_restart_service_on_sighup(self, service, workers=1): self._start_server(callback=service, workers=workers) os.kill(self.service_pid, signal.SIGHUP) expected_msg = ( test_server.FAKE_START_MSG * workers + test_server.FAKE_RESET_MSG * (workers + 1)) expected_size = len(expected_msg) utils.wait_until_true( lambda: (os.path.isfile(self.temp_file) and os.stat(self.temp_file).st_size == expected_size), timeout=5, sleep=0.1, exception=RuntimeError( "Timed out waiting for file %(filename)s to be created and " "its size become equal to %(size)s." % {'filename': self.temp_file, 'size': expected_size})) with open(self.temp_file, 'rb') as f: res = f.readline() self.assertEqual(expected_msg, res) def test_start(self): cfg.CONF.set_override( 'service_plugins', [ 'networking_sfc.services.flowclassifier.plugin.' 'FlowClassifierPlugin', 'networking_sfc.services.sfc.plugin.SfcPlugin' ] ) cfg.CONF.set_override( 'drivers', ['ovs'], group='flowclassifier' ) cfg.CONF.set_override( 'drivers', ['ovs'], group='sfc' ) super(TestService, self).test_start() networking-sfc-10.0.0/networking_sfc/tests/base.py0000664000175000017500000001255013656750333022240 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib from unittest import mock from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api import extensions as api_ext from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api as dhcp_rpc_log from neutron.api.v2 import resource as api_res_log from neutron import manager from neutron.notifiers import nova as nova_log from neutron.plugins.ml2 import db as ml2_db from neutron.plugins.ml2.drivers import type_flat from neutron.plugins.ml2.drivers import type_local from neutron.plugins.ml2.drivers import type_tunnel from neutron.plugins.ml2.drivers import type_vlan from neutron.plugins.ml2.drivers import type_vxlan # noqa from neutron.plugins.ml2 import managers as ml2_manager from neutron.plugins.ml2 import plugin as ml2_plugin from neutron import quota as quota_log from neutron.scheduler import dhcp_agent_scheduler as dhcp_agent_log from neutron_lib import constants as nl_const from oslo_config import cfg from oslo_utils import uuidutils from neutron.tests import base as n_base from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_plugin class BaseTestCase(n_base.BaseTestCase): pass class NeutronDbPluginV2TestCase(test_db_plugin.NeutronDbPluginV2TestCase): def setUp(self, plugin=None, service_plugins=None, ext_mgr=None): self._mock_unnecessary_logging() if not plugin: plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin' cfg.CONF.set_override('tenant_network_types', ['vxlan'], group='ml2') cfg.CONF.set_override( 'vni_ranges', ['1:1000'], group='ml2_type_vxlan') cfg.CONF.set_override( 'mechanism_drivers', ['openvswitch'], group='ml2') super(NeutronDbPluginV2TestCase, self).setUp( ext_mgr=ext_mgr, plugin=plugin, service_plugins=service_plugins ) self._tenant_id = uuidutils.generate_uuid() self._network = self._make_network( self.fmt, 'net1', True) self._subnet = self._make_subnet( self.fmt, self._network, gateway='10.0.0.1', cidr='10.0.0.0/24', ip_version=4 ) self._gateway = self._create_port( self.fmt, self._network['network']['id'], device_owner=nl_const.DEVICE_OWNER_ROUTER_INTF ) def _mock_unnecessary_logging(self): mock_log_sg_rpc_p = mock.patch.object(sg_rpc, 'LOG') self.mock_log_sg_rpc = mock_log_sg_rpc_p.start() mock_log_api_ext_p = mock.patch.object(api_ext, 'LOG') self.mock_log_api_ext = mock_log_api_ext_p.start() mock_log_dhcp_rpc_log_p = mock.patch.object(dhcp_rpc_log, 'LOG') self.mock_log_dhcp_rpc_log = mock_log_dhcp_rpc_log_p.start() mock_log_dhcp_rpc_log_p = mock.patch.object(dhcp_rpc_log, 'LOG') self.mock_log_dhcp_rpc_log = mock_log_dhcp_rpc_log_p.start() mock_log_api_res_log_p = mock.patch.object(api_res_log, 'LOG') self.mock_log_api_res_log = mock_log_api_res_log_p.start() mock_log_cfg_p = mock.patch.object(cfg, 'LOG') self.mock_log_cfg = mock_log_cfg_p.start() mock_log_manager_p = mock.patch.object(manager, 'LOG') self.mock_log_manager = mock_log_manager_p.start() mock_log_nova_p = mock.patch.object(nova_log, 'LOG') self.mock_log_nova = mock_log_nova_p.start() mock_log_ml2_db_p = mock.patch.object(ml2_db, 'LOG') self.mock_log_ml2_db = mock_log_ml2_db_p.start() mock_log_ml2_manager_p = mock.patch.object(ml2_manager, 'LOG') self.mock_log_ml2_manager = mock_log_ml2_manager_p.start() mock_log_plugin_p = mock.patch.object(ml2_plugin, 'LOG') self.mock_log_plugin = mock_log_plugin_p.start() mock_log_type_flat_p = mock.patch.object(type_flat, 'LOG') self.mock_log_type_flat = mock_log_type_flat_p.start() mock_log_type_local_p = mock.patch.object(type_local, 'LOG') self.mock_log_type_local = mock_log_type_local_p.start() mock_log_type_tunnel_p = mock.patch.object(type_tunnel, 'LOG') self.mock_log_type_tunnel = mock_log_type_tunnel_p.start() mock_log_type_vlan_p = mock.patch.object(type_vlan, 'LOG') self.mock_log_type_vlan = mock_log_type_vlan_p.start() mock_log_quota_log_p = mock.patch.object(quota_log, 'LOG') self.mock_log_quota_log = mock_log_quota_log_p.start() mock_log_dhcp_agent_log_p = mock.patch.object(dhcp_agent_log, 'LOG') self.mock_log_dhcp_agent_log = mock_log_dhcp_agent_log_p.start() def tearDown(self): super(NeutronDbPluginV2TestCase, self).tearDown() @contextlib.contextmanager def port(self, fmt=None, **kwargs): net_id = self._network['network']['id'] port = self._make_port(fmt or self.fmt, net_id, **kwargs) yield port networking-sfc-10.0.0/networking_sfc/extensions/0000775000175000017500000000000013656750461022010 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/extensions/__init__.py0000664000175000017500000000000013656750333024105 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/extensions/tap.py0000664000175000017500000000430413656750333023145 0ustar zuulzuul00000000000000# Copyright (c) 2017 One Convergence Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import converters as lib_converters from neutron_lib.api import extensions from neutron_lib import exceptions as neutron_exc from networking_sfc._i18n import _ from networking_sfc.extensions import sfc DEFAULT_TAP_ENABLED = False TAP_EXT = "networking-sfc-tap" class MultiplePortPairsInTapPPGNotSupported(neutron_exc.InvalidInput): """Multiple Port Pairs in Tap PPG not allowed""" message = _("Multiple port pairs in Tap port-pair-group is not allowed.") class ConsecutiveTapPPGNotSupported(neutron_exc.InvalidInput): """Unsupported Tap deployment""" message = _("Consecutive Tap PPG in port-chain is not supported now.") EXTENDED_ATTRIBUTES_2_0 = { 'port_pair_groups': { 'tap_enabled': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': DEFAULT_TAP_ENABLED, 'convert_to': lib_converters.convert_to_boolean } } } class Tap(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Networking SFC Passive Tap Service Function support" @classmethod def get_alias(cls): return TAP_EXT @classmethod def get_description(cls): return "Extension for Passive TAP Service Function support" @classmethod def get_updated(cls): return "2017-10-20T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} @classmethod def get_plugin_interface(cls): return sfc.SfcPluginBase networking-sfc-10.0.0/networking_sfc/extensions/flowclassifier.py0000664000175000017500000002525313656750333025403 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from abc import ABCMeta from abc import abstractmethod import six from neutron_lib.api import attributes as attr from neutron_lib.api import converters from neutron_lib.api import extensions from neutron_lib import constants as const from neutron_lib.db import constants as db_const from neutron_lib import exceptions as neutron_exc from neutron_lib.services import base as service_base from oslo_config import cfg from neutron.api import extensions as neutron_ext from neutron.api.v2 import resource_helper from networking_sfc._i18n import _ from networking_sfc import extensions as sfc_extensions cfg.CONF.import_opt('api_extensions_path', 'neutron.common.config') neutron_ext.append_api_extensions_path(sfc_extensions.__path__) FLOW_CLASSIFIER_EXT = "flow_classifier" FLOW_CLASSIFIER_PREFIX = "/sfc" fc_supported_protocols = [const.PROTO_NAME_TCP, const.PROTO_NAME_UDP, const.PROTO_NAME_ICMP] fc_supported_ethertypes = ['IPv4', 'IPv6'] SUPPORTED_L7_PARAMETERS = {} _l7_param_attrs = attr.AttributeInfo(SUPPORTED_L7_PARAMETERS) # Flow Classifier Exceptions class FlowClassifierNotFound(neutron_exc.NotFound): message = _("Flow Classifier %(id)s not found.") class FlowClassifierPortNotFound(neutron_exc.NotFound): message = _("Flow Classifier Neutron Port %(id)s not found.") class FlowClassifierInvalidPortRange(neutron_exc.InvalidInput): message = _("Invalid IP protocol port range. min_port_range=" "%(port_range_min)s must be lesser or equal to " "max_port_range=%(port_range_max)s.") class FlowClassifierInvalidPortValue(neutron_exc.InvalidInput): message = _("Flow Classifier has invalid port value %(port)s.") class FlowClassiferDuplicateInformation(neutron_exc.InvalidInput): message = _("Flow Classifier has duplicate information: " "Neutron Port id %(port_id)s and ip prefix %(ip_prefix)s.") class FlowClassifierInUse(neutron_exc.InUse): message = _("Flow Classifier %(id)s in use.") class FlowClassifierInConflict(neutron_exc.InvalidInput): message = _("Flow Classifier conflicts with " "another Flow Classifier %(id)s.") class FlowClassifierInvalidProtocol(neutron_exc.InvalidInput): message = _("Flow Classifier does not support protocol %(protocol)s. " "Supported protocol values are %(values)s.") class FlowClassifierInvalidEthertype(neutron_exc.InvalidInput): message = _("Flow Classifier does not support ethertype %(ethertype)s. " "Supported ethertype values are %(values)s.") class FlowClassifierProtocolRequiredWithPorts(neutron_exc.InvalidInput): message = _("IP protocol must be TCP or UDP, if port range is given.") class FlowClassifierIpPrefixFormatConflictWithEthertype( neutron_exc.InvalidInput ): message = _("IP prefix %(ip_prefix)s format conflicts with " "ethertype %(ethertype)s.") class FlowClassifierInvalidL7Parameter(neutron_exc.InvalidInput): message = _( "Invalid Flow Classifier parameters: %%(error_message)s. " "Supported flow classifier parameters are %(supported_parameters)s." ) % {'supported_parameters': SUPPORTED_L7_PARAMETERS} def normalize_protocol(value): if value is None: return None if isinstance(value, six.string_types): if value.lower() in fc_supported_protocols: return value.lower() raise FlowClassifierInvalidProtocol( protocol=value, values=fc_supported_protocols) def normalize_ethertype(value): if value is None: return 'IPv4' if isinstance(value, six.string_types): for ether_type in fc_supported_ethertypes: if value.lower() == ether_type.lower(): return ether_type raise FlowClassifierInvalidEthertype( ethertype=value, values=fc_supported_ethertypes) def normalize_string(value): if value is None: return '' return value def normalize_port_value(port): if port is None: return None try: val = int(port) except (ValueError, TypeError): raise FlowClassifierInvalidPortValue(port=port) if 0 <= val <= 65535: return val else: raise FlowClassifierInvalidPortValue(port=port) def normalize_l7parameters(parameters): parameters = converters.convert_none_to_empty_dict(parameters) for key in parameters: if key not in SUPPORTED_L7_PARAMETERS: raise FlowClassifierInvalidL7Parameter( error_message='Unknown key %s.' % key) try: _l7_param_attrs.fill_post_defaults(parameters) attr.populate_project_info(SUPPORTED_L7_PARAMETERS) _l7_param_attrs.convert_values(parameters) except ValueError as error: raise FlowClassifierInvalidL7Parameter(error_message=str(error)) return parameters # Attribute Map RESOURCE_ATTRIBUTE_MAP = { 'flow_classifiers': { 'id': { 'allow_post': False, 'allow_put': False, 'is_visible': True, 'validate': {'type:uuid': None}, 'primary_key': True}, 'name': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'validate': {'type:string': db_const.NAME_FIELD_SIZE}, 'convert_to': normalize_string}, 'description': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'validate': {'type:string': db_const.DESCRIPTION_FIELD_SIZE}, 'convert_to': normalize_string}, 'tenant_id': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'validate': {'type:string': db_const.PROJECT_ID_FIELD_SIZE}, 'required_by_policy': True}, 'ethertype': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'convert_to': normalize_ethertype}, 'protocol': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'convert_to': normalize_protocol}, 'source_port_range_min': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'convert_to': normalize_port_value}, 'source_port_range_max': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'convert_to': normalize_port_value}, 'destination_port_range_min': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'convert_to': normalize_port_value}, 'destination_port_range_max': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'convert_to': normalize_port_value}, 'source_ip_prefix': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'validate': {'type:subnet_or_none': None}}, 'destination_ip_prefix': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'validate': {'type:subnet_or_none': None}}, 'logical_source_port': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'validate': {'type:uuid_or_none': None}}, 'logical_destination_port': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'validate': {'type:uuid_or_none': None}}, 'l7_parameters': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'validate': {'type:dict': None}, 'convert_to': normalize_l7parameters}, }, } flow_classifier_quota_opts = [ cfg.IntOpt('quota_flow_classifier', default=100, help=_('Maximum number of Flow Classifiers per tenant. ' 'A negative value means unlimited.')), ] cfg.CONF.register_opts(flow_classifier_quota_opts, 'QUOTAS') class Flowclassifier(extensions.ExtensionDescriptor): """Flow Classifier extension.""" @classmethod def get_name(cls): return "Flow Classifier" @classmethod def get_alias(cls): return FLOW_CLASSIFIER_EXT @classmethod def get_description(cls): return "Flow Classifier Extension." @classmethod def get_plugin_interface(cls): return FlowClassifierPluginBase @classmethod def get_updated(cls): return "2015-10-05T10:00:00-00:00" def update_attributes_map(self, attributes): super(Flowclassifier, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) plural_mappings['flow_classifiers'] = 'flow_classifier' return resource_helper.build_resource_info( plural_mappings, RESOURCE_ATTRIBUTE_MAP, FLOW_CLASSIFIER_EXT, register_quota=True) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP return {} @six.add_metaclass(ABCMeta) class FlowClassifierPluginBase(service_base.ServicePluginBase): def get_plugin_type(self): return FLOW_CLASSIFIER_EXT def get_plugin_description(self): return 'Flow Classifier plugin' @abstractmethod def create_flow_classifier(self, context, flow_classifier): pass @abstractmethod def update_flow_classifier(self, context, id, flow_classifier): pass @abstractmethod def delete_flow_classifier(self, context, id): pass @abstractmethod def get_flow_classifiers(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abstractmethod def get_flow_classifier(self, context, id, fields=None): pass networking-sfc-10.0.0/networking_sfc/extensions/servicegraph.py0000775000175000017500000001630513656750333025052 0ustar zuulzuul00000000000000# Copyright 2017 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from abc import ABCMeta from abc import abstractmethod import six from neutron_lib.api import converters as lib_converters from neutron_lib.api import extensions from neutron_lib.api import validators as lib_validators from neutron_lib.db import constants as db_const from neutron_lib import exceptions as neutron_exc from oslo_config import cfg from oslo_utils import uuidutils from neutron.api import extensions as neutron_ext from neutron.api.v2 import resource_helper from networking_sfc._i18n import _ from networking_sfc import extensions as sfc_extensions from networking_sfc.extensions import sfc as ext_sfc cfg.CONF.import_opt('api_extensions_path', 'neutron.common.config') neutron_ext.append_api_extensions_path(sfc_extensions.__path__) SG_EXT = "service_graph" SG_PREFIX = ext_sfc.SFC_PREFIX SERVICE_GRAPH = 'service_graph' SERVICE_GRAPHS = '%ss' % SERVICE_GRAPH # NOTE(scsnow): move to neutron-lib def validate_list_of_allowed_values(data, allowed_values=None): if not isinstance(data, list): msg = _("'%s' is not a list") % data return msg illegal_values = set(data) - set(allowed_values) if illegal_values: msg = _("Illegal values in a list: %s") % ', '.join(illegal_values) return msg lib_validators.validators['type:list_of_allowed_values'] = \ validate_list_of_allowed_values class InvalidUUID(neutron_exc.InvalidInput): message = _( "An invalid UUID was specified: %%(error_message)s. " "Make sure only valid UUIDs are provided.") class ServiceGraphInvalidPortChains(neutron_exc.InUse): message = _("Some of the Port Chain(s): %(port_chains)s, " "are already in use by a Service Graph.") class ServiceGraphPortChainInUse(neutron_exc.InUse): message = _("Port Chain %(id)s in use.") class ServiceGraphNotFound(neutron_exc.NotFound): message = _("Service Graph %(id)s not found.") class ServiceGraphLoopDetected(neutron_exc.InvalidInput): message = _("Service Graph defined contains at least one port chain loop.") class ServiceGraphInconsistentEncapsulation(neutron_exc.InvalidInput): message = _("Service Graph may only connect port-chains " "sharing the same correlation.") class ServiceGraphImpossibleBranching(neutron_exc.InvalidInput): message = _("Service Graphs require source (branching) and destination " "port pair groups (their PPs) to have correlation enabled.") class ServiceGraphFlowClassifierInConflict(neutron_exc.InvalidInput): message = _("Flow Classifier %(fc1_id)s conflicts with Flow Classifier " "%(fc2_id)s on one of the branching points being created.") class ServiceGraphPortChainInConflict(neutron_exc.InvalidInput): message = _("Port Chain %(pc_id)s is duplicated on one " "of the branching points being created.") def normalize_service_graph(port_chains): port_chains = lib_converters.convert_none_to_empty_dict(port_chains) for key in port_chains: if uuidutils.is_uuid_like(key): for val in port_chains[key]: if not uuidutils.is_uuid_like(val): raise InvalidUUID( error_message='UUID of destination Port-Chain ' 'is invalid: %s.' % key) else: raise InvalidUUID( error_message='UUID of source Port-Chain' 'is invalid: %s.' % key) return port_chains RESOURCE_ATTRIBUTE_MAP = { SERVICE_GRAPHS: { 'id': { 'allow_post': False, 'allow_put': False, 'is_visible': True, 'validate': {'type:uuid': None}, 'primary_key': True}, 'name': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {'type:string': db_const.NAME_FIELD_SIZE}}, 'description': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {'type:string': db_const.DESCRIPTION_FIELD_SIZE}}, 'project_id': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'validate': {'type:string': db_const.PROJECT_ID_FIELD_SIZE}, 'required_by_policy': True}, 'port_chains': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'validate': {'type:dict': None}, 'convert_to': normalize_service_graph} } } service_graph_quota_opts = [ cfg.IntOpt('quota_service_graphs', default=10, help=_('maximum number of Service Graphs per project. ' 'a negative value means unlimited.')) ] cfg.CONF.register_opts(service_graph_quota_opts, 'QUOTAS') class Servicegraph(extensions.ExtensionDescriptor): """Service Graph extension.""" @classmethod def get_name(cls): return "Service Graph" @classmethod def get_alias(cls): return SG_EXT @classmethod def get_description(cls): return "Service Graph extension." @classmethod def get_updated(cls): return "2017-09-20T00:00:00-00:00" def update_attributes_map(self, attributes): super(Servicegraph, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) @classmethod def get_resources(cls): plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) plural_mappings['service_graphs'] = 'service_graph' return resource_helper.build_resource_info( plural_mappings, RESOURCE_ATTRIBUTE_MAP, ext_sfc.SFC_EXT, register_quota=True) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP return {} @six.add_metaclass(ABCMeta) class ServiceGraphPluginBase(object): def get_plugin_type(self): return SG_EXT def get_plugin_description(self): return 'SFC Service Graphs extension for networking-sfc.' @abstractmethod def create_service_graph(self, context, service_graph): pass @abstractmethod def update_service_graph(self, context, id, service_graph): pass @abstractmethod def delete_service_graph(self, context, id): pass @abstractmethod def get_service_graphs(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abstractmethod def get_service_graph(self, context, id, fields=None): pass networking-sfc-10.0.0/networking_sfc/extensions/sfc.py0000664000175000017500000004427013656750333023142 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from abc import ABCMeta from abc import abstractmethod import six from neutron_lib.api import converters as lib_converters from neutron_lib.api import extensions from neutron_lib.api import validators as lib_validators from neutron_lib.db import constants as db_const from neutron_lib import exceptions as neutron_exc from neutron_lib.services import base as service_base from oslo_config import cfg from neutron.api import extensions as neutron_ext from neutron.api.v2 import resource_helper from networking_sfc._i18n import _ from networking_sfc import extensions as sfc_extensions from networking_sfc.extensions import flowclassifier as ext_fc cfg.CONF.import_opt('api_extensions_path', 'neutron.common.config') neutron_ext.append_api_extensions_path(sfc_extensions.__path__) SFC_EXT = "sfc" SFC_PREFIX = "/sfc" # Default Chain Parameters DEFAULT_CHAIN_CORRELATION = 'mpls' DEFAULT_CHAIN_SYMMETRY = False DEFAULT_CHAIN_PARAMETERS = {'correlation': DEFAULT_CHAIN_CORRELATION, 'symmetric': DEFAULT_CHAIN_SYMMETRY} # Default SF Parameters DEFAULT_SF_PARAMETERS = {'correlation': None, 'weight': 1} # Default and Supported PPG Parameters DEFAULT_PPG_LB_FIELDS = [] DEFAULT_PPG_N_TUPLE = {'ingress_n_tuple': {}, 'egress_n_tuple': {}} DEFAULT_PPG_PARAMETERS = {'lb_fields': DEFAULT_PPG_LB_FIELDS, 'ppg_n_tuple_mapping': DEFAULT_PPG_N_TUPLE} SUPPORTED_LB_FIELDS = [ "eth_src", "eth_dst", "ip_src", "ip_dst", "tcp_src", "tcp_dst", "udp_src", "udp_dst" ] SUPPORTED_PPG_TUPLE_MAPPING = { 'source_ip_prefix': None, 'destination_ip_prefix': None, 'source_port_range_min': None, 'source_port_range_max': None, 'destination_port_range_min': None, 'destination_port_range_max': None, } MAX_CHAIN_ID = 65535 # NOTE(scsnow): move to neutron-lib def validate_list_of_allowed_values(data, allowed_values=None): if not isinstance(data, list): msg = _("'%s' is not a list") % data return msg illegal_values = set(data) - set(allowed_values) if illegal_values: msg = _("Illegal values in a list: %s") % ', '.join(illegal_values) return msg lib_validators.validators['type:list_of_allowed_values'] = \ validate_list_of_allowed_values # DEFAULT RESOURCE_ATTRIBUTE_MAP for ingress_n_tuple and egress_n_tuple in # ppg_n_tuple_mapping validate dict ppg_n_tuple_validact_dict = { 'source_ip_prefix': { 'default': None, 'validate': {'type:subnet_or_none': None} }, 'destination_ip_prefix': { 'default': None, 'validate': {'type:subnet_or_none': None} }, 'source_port_range_min': { 'default': None, 'convert_to': ext_fc.normalize_port_value }, 'source_port_range_max': { 'default': None, 'convert_to': ext_fc.normalize_port_value }, 'destination_port_range_min': { 'default': None, 'convert_to': ext_fc.normalize_port_value }, 'destination_port_range_max': { 'default': None, 'convert_to': ext_fc.normalize_port_value } } class PortChainNotFound(neutron_exc.NotFound): message = _("Port Chain %(id)s not found.") class PortChainUnavailableChainId(neutron_exc.InvalidInput): message = _("Port Chain %(id)s no available chain id.") class PortChainFlowClassifierInConflict(neutron_exc.InvalidInput): message = _("Flow Classifier %(fc_id)s conflicts with " "Flow Classifier %(pc_fc_id)s in port chain %(pc_id)s.") class PortChainChainIdInConflict(neutron_exc.InvalidInput): message = _("Chain id %(chain_id)s conflicts with " "Chain id in port chain %(pc_id)s.") class PortChainInconsistentCorrelations(neutron_exc.InvalidInput): message = _("Port Chain attempted creation included a Port Pair Group " "(%(ppg)s) with a different protocol used as correlation " "type.") class PortPairGroupNotSpecified(neutron_exc.InvalidInput): message = _("Port Pair Group is not specified in Port Chain.") class InconsistentCorrelations(neutron_exc.InvalidInput): message = _("Port Pair Group attempted creation included Port Pairs " "with inconsistent correlation types.") class InvalidPortPairGroups(neutron_exc.InUse): message = _("Port Pair Group(s) %(port_pair_groups)s in use by " "Port Chain %(port_chain)s.") class PortPairPortNotFound(neutron_exc.NotFound): message = _("Port Pair port %(id)s not found.") class PortPairIngressEgressDifferentHost(neutron_exc.InvalidInput): message = _("Port Pair ingress port %(ingress)s and " "egress port %(egress)s not in the same host.") class PortPairIngressNoHost(neutron_exc.InvalidInput): message = _("Port Pair ingress port %(ingress)s does not " "belong to a host.") class PortPairEgressNoHost(neutron_exc.InvalidInput): message = _("Port Pair egress port %(egress)s does not " "belong to a host.") class PortPairIngressEgressInUse(neutron_exc.InvalidInput): message = _("Port Pair with ingress port %(ingress)s " "and egress port %(egress)s is already used by " "another Port Pair %(id)s.") class PortPairNotFound(neutron_exc.NotFound): message = _("Port Pair %(id)s not found.") class PortPairGroupNotFound(neutron_exc.NotFound): message = _("Port Pair Group %(id)s not found.") class PortPairGroupInUse(neutron_exc.InUse): message = _("Port Pair Group %(id)s in use.") class PortPairInUse(neutron_exc.InUse): message = _("Port Pair %(id)s in use.") class PPGParametersInvalidNTupleMappingParameter(neutron_exc.InvalidInput): message = _( "Invalid Port Pair Group N-Tuple Mapping parameters: " "%%(error_message)s. Supported PPG classifier N-Tuple Mapping " "parameters are %(supported_parameters)s." ) % {'supported_parameters': SUPPORTED_PPG_TUPLE_MAPPING} def normalize_port_pair_groups(port_pair_groups): port_pair_groups = lib_converters.convert_to_list(port_pair_groups) if not port_pair_groups: raise PortPairGroupNotSpecified() return port_pair_groups def normalize_chain_parameters(parameters): if not parameters: return DEFAULT_CHAIN_PARAMETERS if 'correlation' not in parameters: parameters['correlation'] = DEFAULT_CHAIN_CORRELATION if 'symmetric' not in parameters: parameters['symmetric'] = DEFAULT_CHAIN_SYMMETRY return parameters def normalize_sf_parameters(parameters): return parameters if parameters else DEFAULT_SF_PARAMETERS def normalize_ppg_parameters(parameters): if not parameters: return DEFAULT_PPG_PARAMETERS if 'lb_fields' not in parameters: parameters['lb_fields'] = DEFAULT_PPG_LB_FIELDS if 'ppg_n_tuple_mapping' not in parameters: parameters['ppg_n_tuple_mapping'] = DEFAULT_PPG_N_TUPLE if 'ppg_n_tuple_mapping' in parameters: for key, value in parameters['ppg_n_tuple_mapping'].items(): for n_key in value: if n_key not in SUPPORTED_PPG_TUPLE_MAPPING: raise PPGParametersInvalidNTupleMappingParameter( error_message='Unknow key %s.' % n_key) return parameters RESOURCE_ATTRIBUTE_MAP = { 'port_pairs': { 'id': { 'allow_post': False, 'allow_put': False, 'is_visible': True, 'validate': {'type:uuid': None}, 'primary_key': True }, 'name': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {'type:string': db_const.NAME_FIELD_SIZE}, }, 'description': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {'type:string': db_const.DESCRIPTION_FIELD_SIZE}, }, 'tenant_id': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'validate': {'type:string': db_const.PROJECT_ID_FIELD_SIZE}, 'required_by_policy': True }, 'ingress': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'validate': {'type:uuid': None} }, 'egress': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'validate': {'type:uuid': None} }, 'service_function_parameters': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'validate': { 'type:dict': { 'correlation': { 'default': DEFAULT_SF_PARAMETERS['correlation'], 'type:values': [None, 'mpls', 'nsh'] }, 'weight': { 'default': DEFAULT_SF_PARAMETERS['weight'], 'type:non_negative': None, 'convert_to': lib_converters.convert_to_int } } }, 'convert_to': normalize_sf_parameters } }, 'port_chains': { 'id': { 'allow_post': False, 'allow_put': False, 'is_visible': True, 'validate': {'type:uuid': None}, 'primary_key': True }, 'chain_id': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': 0, 'validate': {'type:range': (0, MAX_CHAIN_ID)}, 'convert_to': lib_converters.convert_to_int }, 'name': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {'type:string': db_const.NAME_FIELD_SIZE}, }, 'description': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {'type:string': db_const.DESCRIPTION_FIELD_SIZE}, }, 'tenant_id': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'validate': {'type:string': db_const.PROJECT_ID_FIELD_SIZE}, 'required_by_policy': True }, 'port_pair_groups': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'validate': {'type:uuid_list': None}, 'convert_to': normalize_port_pair_groups }, 'flow_classifiers': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'validate': {'type:uuid_list': None}, 'convert_to': lib_converters.convert_to_list }, 'chain_parameters': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'validate': { 'type:dict': { 'correlation': { 'default': DEFAULT_CHAIN_PARAMETERS['correlation'], 'type:values': ['mpls', 'nsh'] }, 'symmetric': { 'default': DEFAULT_CHAIN_PARAMETERS['symmetric'], 'convert_to': lib_converters.convert_to_boolean } } }, 'convert_to': normalize_chain_parameters } }, 'port_pair_groups': { 'id': { 'allow_post': False, 'allow_put': False, 'is_visible': True, 'validate': {'type:uuid': None}, 'primary_key': True}, 'group_id': { 'allow_post': False, 'allow_put': False, 'is_visible': True }, 'name': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {'type:string': db_const.NAME_FIELD_SIZE}, }, 'description': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {'type:string': db_const.DESCRIPTION_FIELD_SIZE}, }, 'tenant_id': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'validate': {'type:string': db_const.PROJECT_ID_FIELD_SIZE}, 'required_by_policy': True }, 'port_pairs': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'validate': {'type:uuid_list': None}, 'convert_to': lib_converters.convert_none_to_empty_list }, 'port_pair_group_parameters': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'validate': { 'type:dict': { 'lb_fields': { 'default': DEFAULT_PPG_PARAMETERS['lb_fields'], 'type:list_of_allowed_values': SUPPORTED_LB_FIELDS }, 'ppg_n_tuple_mapping': { 'default': DEFAULT_PPG_PARAMETERS[ 'ppg_n_tuple_mapping'], 'validate': { 'type:dict': { 'ingress_n_tuple': { 'default': {}, 'validate': { 'type:dict': ppg_n_tuple_validact_dict } }, 'egress_n_tuple': { 'default': {}, 'validate': { 'type:dict': ppg_n_tuple_validact_dict } } } }, 'convert_to': lib_converters.convert_none_to_empty_dict } } }, 'convert_to': normalize_ppg_parameters } } } sfc_quota_opts = [ cfg.IntOpt('quota_port_chain', default=10, help=_('Maximum number of port chains per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_port_pair_group', default=10, help=_('maximum number of port pair group per tenant. ' 'a negative value means unlimited.')), cfg.IntOpt('quota_port_pair', default=100, help=_('maximum number of port pair per tenant. ' 'a negative value means unlimited.')) ] cfg.CONF.register_opts(sfc_quota_opts, 'QUOTAS') class Sfc(extensions.ExtensionDescriptor): """Service Function Chain extension.""" @classmethod def get_name(cls): return "Service Function Chaining" @classmethod def get_alias(cls): return SFC_EXT @classmethod def get_description(cls): return "Service Function Chain extension." @classmethod def get_plugin_interface(cls): return SfcPluginBase @classmethod def get_updated(cls): return "2015-10-05T10:00:00-00:00" def update_attributes_map(self, attributes): super(Sfc, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) plural_mappings['sfcs'] = 'sfc' return resource_helper.build_resource_info( plural_mappings, RESOURCE_ATTRIBUTE_MAP, SFC_EXT, register_quota=True) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP return {} @six.add_metaclass(ABCMeta) class SfcPluginBase(service_base.ServicePluginBase): def get_plugin_type(self): return SFC_EXT def get_plugin_description(self): return 'SFC service plugin for service chaining.' @abstractmethod def create_port_chain(self, context, port_chain): pass @abstractmethod def update_port_chain(self, context, id, port_chain): pass @abstractmethod def delete_port_chain(self, context, id): pass @abstractmethod def get_port_chains(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abstractmethod def get_port_chain(self, context, id, fields=None): pass @abstractmethod def create_port_pair_group(self, context, port_pair_group): pass @abstractmethod def update_port_pair_group(self, context, id, port_pair_group): pass @abstractmethod def delete_port_pair_group(self, context, id): pass @abstractmethod def get_port_pair_groups(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abstractmethod def get_port_pair_group(self, context, id, fields=None): pass @abstractmethod def create_port_pair(self, context, port_pair): pass @abstractmethod def update_port_pair(self, context, id, port_pair): pass @abstractmethod def delete_port_pair(self, context, id): pass @abstractmethod def get_port_pairs(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abstractmethod def get_port_pair(self, context, id, fields=None): pass networking-sfc-10.0.0/networking_sfc/db/0000775000175000017500000000000013656750461020176 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/db/__init__.py0000664000175000017500000000000013656750333022273 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/db/flowclassifier_db.py0000664000175000017500000003352113656750333024233 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm.collections import attribute_mapped_collection from sqlalchemy.orm import exc from neutron_lib import constants as const from neutron_lib.db import api as db_api from neutron_lib.db import model_base from neutron_lib.db import model_query from neutron_lib.db import utils as db_utils from neutron.db import models_v2 from networking_sfc.extensions import flowclassifier as fc_ext LOG = logging.getLogger(__name__) UUID_LEN = 36 class L7Parameter(model_base.BASEV2): """Represents a L7 parameter.""" __tablename__ = 'sfc_flow_classifier_l7_parameters' keyword = sa.Column(sa.String(255), primary_key=True) value = sa.Column(sa.String(255)) classifier_id = sa.Column( sa.String(36), sa.ForeignKey('sfc_flow_classifiers.id', ondelete='CASCADE'), primary_key=True) class FlowClassifier(model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a v2 neutron flow classifier.""" __tablename__ = 'sfc_flow_classifiers' name = sa.Column(sa.String(255)) ethertype = sa.Column(sa.String(40)) protocol = sa.Column(sa.String(40)) description = sa.Column(sa.String(255)) source_port_range_min = sa.Column(sa.Integer) source_port_range_max = sa.Column(sa.Integer) destination_port_range_min = sa.Column(sa.Integer) destination_port_range_max = sa.Column(sa.Integer) source_ip_prefix = sa.Column(sa.String(255)) destination_ip_prefix = sa.Column(sa.String(255)) logical_source_port = sa.Column( sa.String(UUID_LEN), sa.ForeignKey('ports.id', ondelete='RESTRICT')) logical_destination_port = sa.Column( sa.String(UUID_LEN), sa.ForeignKey('ports.id', ondelete='RESTRICT')) l7_parameters = orm.relationship( L7Parameter, collection_class=attribute_mapped_collection('keyword'), cascade='all, delete-orphan') class FlowClassifierDbPlugin(fc_ext.FlowClassifierPluginBase): @classmethod def _check_port_range_valid(cls, port_range_min, port_range_max, protocol): if ( port_range_min is not None and port_range_max is not None and port_range_min > port_range_max ): raise fc_ext.FlowClassifierInvalidPortRange( port_range_min=port_range_min, port_range_max=port_range_max ) if port_range_min is not None or port_range_max is not None: if protocol not in [const.PROTO_NAME_TCP, const.PROTO_NAME_UDP]: raise fc_ext.FlowClassifierProtocolRequiredWithPorts() @classmethod def _check_ip_prefix_valid(cls, ip_prefix, ethertype): if ip_prefix is not None: ip = netaddr.IPNetwork(ip_prefix) if ethertype == 'IPv4' and ip.version == 4: pass elif ethertype == 'IPv6' and ip.version == 6: pass else: raise ( fc_ext.FlowClassifierIpPrefixFormatConflictWithEthertype( ip_prefix=ip_prefix, ethertype=ethertype ) ) @classmethod def _logical_port_conflict(cls, first_logical_port, second_logical_port): if first_logical_port is None or second_logical_port is None: return True return first_logical_port == second_logical_port @classmethod def _ip_prefix_conflict(cls, first_ip_prefix, second_ip_prefix): if first_ip_prefix is None or second_ip_prefix is None: return True first_ipset = netaddr.IPSet([first_ip_prefix]) second_ipset = netaddr.IPSet([second_ip_prefix]) return bool(first_ipset & second_ipset) @classmethod def _port_range_conflict( cls, first_port_range_min, first_port_range_max, second_port_range_min, second_port_range_max ): first_conflict = True second_conflict = True if ( first_port_range_min is not None and second_port_range_max is not None ): first_conflict = first_port_range_min <= second_port_range_max if ( first_port_range_max is not None and second_port_range_min is not None ): second_conflict = second_port_range_min <= first_port_range_max return first_conflict & second_conflict @classmethod def _protocol_conflict(cls, first_protocol, second_protocol): if first_protocol is None or second_protocol is None: return True return first_protocol == second_protocol @classmethod def _ethertype_conflict(cls, first_ethertype, second_ethertype): return first_ethertype == second_ethertype @classmethod def flowclassifier_basic_conflict( cls, first_flowclassifier, second_flowclassifier ): return all([ cls._ethertype_conflict( first_flowclassifier['ethertype'], second_flowclassifier['ethertype'] ), cls._protocol_conflict( first_flowclassifier['protocol'], second_flowclassifier['protocol'] ), cls._ip_prefix_conflict( first_flowclassifier['source_ip_prefix'], second_flowclassifier['source_ip_prefix'] ), cls._ip_prefix_conflict( first_flowclassifier['destination_ip_prefix'], second_flowclassifier['destination_ip_prefix'] ), cls._port_range_conflict( first_flowclassifier['source_port_range_min'], first_flowclassifier['source_port_range_max'], second_flowclassifier['source_port_range_min'], second_flowclassifier['source_port_range_max'] ), cls._port_range_conflict( first_flowclassifier['destination_port_range_min'], first_flowclassifier['destination_port_range_max'], second_flowclassifier['destination_port_range_min'], second_flowclassifier['destination_port_range_max'] ) ]) @classmethod def flowclassifier_conflict( cls, first_flowclassifier, second_flowclassifier ): return all([ cls.flowclassifier_basic_conflict( first_flowclassifier, second_flowclassifier ), cls._logical_port_conflict( first_flowclassifier['logical_source_port'], second_flowclassifier['logical_source_port'] ), cls._logical_port_conflict( first_flowclassifier['logical_destination_port'], second_flowclassifier['logical_destination_port'] ) ]) @log_helpers.log_method_call def create_flow_classifier(self, context, flow_classifier): fc = flow_classifier['flow_classifier'] project_id = fc['project_id'] l7_parameters = { key: L7Parameter(key, val) for key, val in fc['l7_parameters'].items()} ethertype = fc['ethertype'] protocol = fc['protocol'] source_port_range_min = fc['source_port_range_min'] source_port_range_max = fc['source_port_range_max'] self._check_port_range_valid(source_port_range_min, source_port_range_max, protocol) destination_port_range_min = fc['destination_port_range_min'] destination_port_range_max = fc['destination_port_range_max'] self._check_port_range_valid(destination_port_range_min, destination_port_range_max, protocol) source_ip_prefix = fc['source_ip_prefix'] self._check_ip_prefix_valid(source_ip_prefix, ethertype) destination_ip_prefix = fc['destination_ip_prefix'] self._check_ip_prefix_valid(destination_ip_prefix, ethertype) logical_source_port = fc['logical_source_port'] logical_destination_port = fc['logical_destination_port'] with db_api.CONTEXT_WRITER.using(context): if logical_source_port is not None: self._get_port(context, logical_source_port) if logical_destination_port is not None: self._get_port(context, logical_destination_port) query = model_query.query_with_hooks(context, FlowClassifier) for flow_classifier_db in query.all(): if self.flowclassifier_conflict( fc, flow_classifier_db ): raise fc_ext.FlowClassifierInConflict( id=flow_classifier_db['id'] ) flow_classifier_db = FlowClassifier( id=uuidutils.generate_uuid(), project_id=project_id, name=fc['name'], description=fc['description'], ethertype=ethertype, protocol=protocol, source_port_range_min=source_port_range_min, source_port_range_max=source_port_range_max, destination_port_range_min=destination_port_range_min, destination_port_range_max=destination_port_range_max, source_ip_prefix=source_ip_prefix, destination_ip_prefix=destination_ip_prefix, logical_source_port=logical_source_port, logical_destination_port=logical_destination_port, l7_parameters=l7_parameters ) context.session.add(flow_classifier_db) return self._make_flow_classifier_dict(flow_classifier_db) def _make_flow_classifier_dict(self, flow_classifier, fields=None): res = { 'id': flow_classifier['id'], 'name': flow_classifier['name'], 'description': flow_classifier['description'], 'project_id': flow_classifier['project_id'], 'ethertype': flow_classifier['ethertype'], 'protocol': flow_classifier['protocol'], 'source_port_range_min': flow_classifier['source_port_range_min'], 'source_port_range_max': flow_classifier['source_port_range_max'], 'destination_port_range_min': ( flow_classifier['destination_port_range_min']), 'destination_port_range_max': ( flow_classifier['destination_port_range_max']), 'source_ip_prefix': flow_classifier['source_ip_prefix'], 'destination_ip_prefix': flow_classifier[ 'destination_ip_prefix'], 'logical_source_port': flow_classifier['logical_source_port'], 'logical_destination_port': flow_classifier[ 'logical_destination_port'], 'l7_parameters': { param['keyword']: param['value'] for k, param in flow_classifier.l7_parameters.items() } } return db_utils.resource_fields(res, fields) @log_helpers.log_method_call def get_flow_classifiers(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = db_utils.get_marker_obj(self, context, 'flow_classifier', limit, marker) return model_query.get_collection( context, FlowClassifier, self._make_flow_classifier_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) @log_helpers.log_method_call def get_flow_classifier(self, context, id, fields=None): flow_classifier = self._get_flow_classifier(context, id) return self._make_flow_classifier_dict(flow_classifier, fields) def _get_flow_classifier(self, context, id): try: return model_query.get_by_id(context, FlowClassifier, id) except exc.NoResultFound: raise fc_ext.FlowClassifierNotFound(id=id) def _get_port(self, context, id): try: return model_query.get_by_id(context, models_v2.Port, id) except exc.NoResultFound: raise fc_ext.FlowClassifierPortNotFound(id=id) @log_helpers.log_method_call def update_flow_classifier(self, context, id, flow_classifier): new_fc = flow_classifier['flow_classifier'] with db_api.CONTEXT_WRITER.using(context): old_fc = self._get_flow_classifier(context, id) old_fc.update(new_fc) return self._make_flow_classifier_dict(old_fc) @log_helpers.log_method_call def delete_flow_classifier(self, context, id): try: with db_api.CONTEXT_WRITER.using(context): fc = self._get_flow_classifier(context, id) context.session.delete(fc) except AssertionError: raise fc_ext.FlowClassifierInUse(id=id) except fc_ext.FlowClassifierNotFound: LOG.info("Deleting a non-existing flow classifier.") networking-sfc-10.0.0/networking_sfc/db/migration/0000775000175000017500000000000013656750461022167 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/__init__.py0000664000175000017500000000000013656750333024264 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/models/0000775000175000017500000000000013656750461023452 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/models/head.py0000664000175000017500000000160213656750333024722 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base from networking_sfc.db import flowclassifier_db # noqa from networking_sfc.db import sfc_db # noqa from networking_sfc.services.sfc.drivers.ovs import db as ovs_db # noqa def get_metadata(): return model_base.BASEV2.metadata networking-sfc-10.0.0/networking_sfc/db/migration/models/__init__.py0000664000175000017500000000000013656750333025547 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/README0000664000175000017500000000015313656750333023044 0ustar zuulzuul00000000000000For details refer to: https://docs.openstack.org/networking-sfc/latest/contributor/alembic_migration.html networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/0000775000175000017500000000000013656750461026017 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/__init__.py0000664000175000017500000000000013656750333030114 0ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/0000775000175000017500000000000013656750461027667 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/pike/0000775000175000017500000000000013656750461030617 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/pike/expand/0000775000175000017500000000000013656750461032076 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000021100000000000011207 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/pike/expand/61832141fb82_add_ppg_n_tuple_mapping_column.pynetworking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/pike/expand/61832141fb0000664000175000017500000000213013656750333033234 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add_ppg_n_tuple_mapping_column Revision ID: 61832141fb82 Revises: 6185f1633a3d Create Date: 2017-04-10 16:39:58.026839 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '61832141fb82' down_revision = '6185f1633a3d' def upgrade(): op.add_column('sfc_path_nodes', sa.Column('ppg_n_tuple_mapping', sa.String(1024), nullable=True)) ././@LongLink0000000000000000000000000000022100000000000011210 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/pike/expand/8329e9be2d8a_modify_value_column_size_in_port_pair_.pynetworking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/pike/expand/8329e9be2d0000664000175000017500000000205613656750333033422 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """modify_value_column_size_in_port_pair_group_params Revision ID: 8329e9be2d8a Revises: 61832141fb82 Create Date: 2017-04-19 15:13:29.833652 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '8329e9be2d8a' down_revision = '61832141fb82' def upgrade(): op.alter_column('sfc_port_pair_group_params', 'value', existing_type=sa.String(255), type_=sa.String(1024)) networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/newton/0000775000175000017500000000000013656750461031201 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/newton/contract/0000775000175000017500000000000013656750461033016 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000020700000000000011214 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/newton/contract/010308b06b49_rename_tenant_to_project.pynetworking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/newton/contract/0103080000664000175000017500000000704013656750333033473 0ustar zuulzuul00000000000000# Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """rename tenant to project Revision ID: 010308b06b49 Create Date: 2016-06-29 19:42:17.862721 """ from alembic import op import sqlalchemy as sa from sqlalchemy.engine import reflection # revision identifiers, used by Alembic. revision = '010308b06b49' down_revision = '48072cb59133' depends_on = ('5a475fc853e6',) _INSPECTOR = None def get_inspector(): """Reuse inspector""" global _INSPECTOR if _INSPECTOR: return _INSPECTOR else: bind = op.get_bind() _INSPECTOR = reflection.Inspector.from_engine(bind) return _INSPECTOR def get_tables(): """Returns hardcoded list of tables which have ``tenant_id`` column. The list is hard-coded to match the state of the schema when this upgrade script is run. Output retrieved by using: >>> metadata = head.get_metadata() >>> all_tables = metadata.sorted_tables >>> tenant_tables = [] >>> for table in all_tables: ... for column in table.columns: ... if column.name == 'tenant_id': ... tenant_tables.append((table, column)) """ tables = [ 'sfc_port_pair_groups', 'sfc_port_pairs', 'sfc_port_chains', 'sfc_flow_classifiers', 'sfc_portpair_details', 'sfc_path_nodes', ] return tables def get_columns(table): """Returns list of columns for given table.""" inspector = get_inspector() return inspector.get_columns(table) def get_data(): """Returns combined list of tuples: [(table, column)]. The list is built from tables with a tenant_id column. """ output = [] tables = get_tables() for table in tables: columns = get_columns(table) for column in columns: if column['name'] == 'tenant_id': output.append((table, column)) return output def alter_column(table, column): old_name = 'tenant_id' new_name = 'project_id' op.alter_column( table_name=table, column_name=old_name, new_column_name=new_name, existing_type=column['type'], existing_nullable=column['nullable'] ) def recreate_index(index, table_name): old_name = index['name'] new_name = old_name.replace('tenant', 'project') op.drop_index(op.f(old_name), table_name) op.create_index(new_name, table_name, ['project_id']) def upgrade(): inspector = get_inspector() data = get_data() for table, column in data: alter_column(table, column) indexes = inspector.get_indexes(table) for index in indexes: if 'tenant_id' in index['name']: recreate_index(index, table) def contract_creation_exceptions(): """Special migration for the blueprint to support Keystone V3. We drop all tenant_id columns and create project_id columns instead. """ return { sa.Column: ['.'.join([table, 'project_id']) for table in get_tables()], sa.Index: get_tables() } ././@LongLink0000000000000000000000000000020600000000000011213 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/newton/contract/06382790fb2c_fix_foreign_constraints.pynetworking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/newton/contract/0638270000664000175000017500000000315713656750333033516 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """fix foreign constraints Revision ID: 06382790fb2c Create Date: 2016-08-11 14:45:34.416120 """ from alembic import op from sqlalchemy.engine import reflection from neutron.db import migration # revision identifiers, used by Alembic. revision = '06382790fb2c' down_revision = '010308b06b49' def upgrade(): inspector = reflection.Inspector.from_engine(op.get_bind()) fks_to_cascade = { 'sfc_flow_classifier_l7_parameters': 'classifier_id', 'sfc_chain_group_associations': 'portchain_id', 'sfc_port_chain_parameters': 'chain_id', 'sfc_service_function_params': 'pair_id', 'sfc_chain_classifier_associations': 'portchain_id' } for table, column in fks_to_cascade.items(): fk_constraints = inspector.get_foreign_keys(table) for fk in fk_constraints: if column in fk['constrained_columns']: fk['options']['ondelete'] = 'CASCADE' migration.remove_foreign_keys(table, fk_constraints) migration.create_foreign_keys(table, fk_constraints) networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/CONTRACT_HEAD0000664000175000017500000000001513656750333031602 0ustar zuulzuul0000000000000006382790fb2c ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/start_networking_sfc.pynetworking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/start_networking_sfc.p0000664000175000017500000000153613656750333034312 0ustar zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """start networking-sfc chain Revision ID: start_networking_sfc Revises: None Create Date: 2015-09-10 18:42:08.262632 """ # revision identifiers, used by Alembic. revision = 'start_networking_sfc' down_revision = None def upgrade(): pass networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/EXPAND_HEAD0000664000175000017500000000001513656750333031344 0ustar zuulzuul00000000000000a3ad63aa834f networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/ocata/0000775000175000017500000000000013656750461030756 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/ocata/expand/0000775000175000017500000000000013656750461032235 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000021000000000000011206 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/ocata/expand/6185f1633a3d_add_correlation_as_pp_detail.pynetworking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/ocata/expand/6185f16330000664000175000017500000000206613656750333033250 0ustar zuulzuul00000000000000# Copyright 2017 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """description of revision Revision ID: 6185f1633a3d Revises: b3adaf631bab Create Date: 2017-02-19 00:00:00.000000 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '6185f1633a3d' down_revision = 'b3adaf631bab' def upgrade(): op.add_column('sfc_portpair_details', sa.Column('correlation', sa.String(length=255), nullable=True)) ././@LongLink0000000000000000000000000000021300000000000011211 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/ocata/expand/b3adaf631bab__add_fwd_path_and_in_mac_column.pynetworking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/ocata/expand/b3adaf6310000664000175000017500000000236213656750333033533 0ustar zuulzuul00000000000000# Copyright 2017 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """_add_fwd_path_and_in_mac_column Revision ID: b3adaf631bab Revises: fa75d46a7f11 Create Date: 2016-10-27 17:01:16.793173 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'b3adaf631bab' down_revision = 'fa75d46a7f11' def upgrade(): op.add_column('sfc_path_nodes', sa.Column('fwd_path', sa.Boolean(), nullable=False)) op.add_column('sfc_portpair_details', sa.Column('in_mac_address', sa.String(length=32), nullable=True)) networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/mitaka/0000775000175000017500000000000013656750461031135 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/0000775000175000017500000000000013656750461032414 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/24fc7241aa5_initial.pynetworking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/24fc72410000664000175000017500000000170613656750333033415 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Initial Mitaka no-op script. Revision ID: 24fc7241aa5 Revises: start_networking_sfc Create Date: 2015-09-11 11:37:19.349951 """ from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '24fc7241aa5' down_revision = 'start_networking_sfc' branch_labels = (cli.EXPAND_BRANCH,) def upgrade(): pass ././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/c3e178d4a985_sfc_data_model.pynetworking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/c3e178d40000664000175000017500000001143013656750333033476 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defining Port Chain data-model. Revision ID: c3e178d4a985 Revises: 9768e6a66c9 Create Date: 2015-09-11 11:37:19.349951 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'c3e178d4a985' down_revision = '9768e6a66c9' def upgrade(): op.create_table( 'sfc_port_pair_groups', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_table( 'sfc_port_pairs', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('ingress', sa.String(length=36), nullable=False), sa.Column('egress', sa.String(length=36), nullable=False), sa.Column('portpairgroup_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['egress'], ['ports.id'], ondelete='RESTRICT'), sa.ForeignKeyConstraint(['ingress'], ['ports.id'], ondelete='RESTRICT'), sa.ForeignKeyConstraint(['portpairgroup_id'], ['sfc_port_pair_groups.id'], ondelete='RESTRICT'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('ingress', 'egress', name='uniq_sfc_port_pairs0ingress0egress') ) op.create_table( 'sfc_port_chains', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_table( 'sfc_chain_group_associations', sa.Column('portpairgroup_id', sa.String(length=36), nullable=False), sa.Column('portchain_id', sa.String(length=36), nullable=False), sa.Column('position', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['portchain_id'], ['sfc_port_chains.id'], ), sa.ForeignKeyConstraint(['portpairgroup_id'], ['sfc_port_pair_groups.id'], ondelete='RESTRICT'), sa.PrimaryKeyConstraint('portpairgroup_id', 'portchain_id') ) op.create_table( 'sfc_port_chain_parameters', sa.Column('keyword', sa.String(length=255), nullable=False), sa.Column('value', sa.String(length=255), nullable=True), sa.Column('chain_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['chain_id'], ['sfc_port_chains.id'], ), sa.PrimaryKeyConstraint('keyword', 'chain_id') ) op.create_table( 'sfc_service_function_params', sa.Column('keyword', sa.String(length=255), nullable=False), sa.Column('value', sa.String(length=255), nullable=True), sa.Column('pair_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['pair_id'], ['sfc_port_pairs.id'], ), sa.PrimaryKeyConstraint('keyword', 'pair_id') ) op.create_table( 'sfc_chain_classifier_associations', sa.Column('flowclassifier_id', sa.String(length=36), nullable=False), sa.Column('portchain_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['flowclassifier_id'], ['sfc_flow_classifiers.id'], ondelete='RESTRICT'), sa.ForeignKeyConstraint(['portchain_id'], ['sfc_port_chains.id'], ), sa.PrimaryKeyConstraint('flowclassifier_id', 'portchain_id'), sa.UniqueConstraint('flowclassifier_id') ) ././@LongLink0000000000000000000000000000020700000000000011214 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/fa75d46a7f11_add_port_pair_group_params.pynetworking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/fa75d46a0000664000175000017500000000372513656750333033565 0ustar zuulzuul00000000000000# Copyright 2016 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add_port_pair_group_params Revision ID: fa75d46a7f11 Revises: d1002a1f97f6 Create Date: 2016-07-03 10:15:29.371910 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'fa75d46a7f11' down_revision = 'd1002a1f97f6' def upgrade(): op.create_table('sfc_port_pair_group_params', sa.Column('keyword', sa.String(length=255), nullable=False), sa.Column('value', sa.String(length=255), nullable=True), sa.Column('pair_group_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['pair_group_id'], ['sfc_port_pair_groups.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('keyword', 'pair_group_id'), mysql_engine='InnoDB' ) op.add_column('sfc_port_chains', sa.Column('chain_id', sa.Integer(), nullable=False)) op.create_unique_constraint(None, 'sfc_port_chains', ['chain_id']) op.add_column('sfc_port_pair_groups', sa.Column('group_id', sa.Integer(), nullable=False)) op.create_unique_constraint(None, 'sfc_port_pair_groups', ['group_id']) ././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/5a475fc853e6_ovs_data_model.pynetworking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/5a475fc80000664000175000017500000001144713656750333033512 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defining OVS data-model Revision ID: 5a475fc853e6 Revises: c3e178d4a985 Create Date: 2015-09-30 18:00:57.758762 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '5a475fc853e6' down_revision = 'c3e178d4a985' def upgrade(): op.create_table('sfc_portpair_details', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('ingress', sa.String(length=36), nullable=True), sa.Column('egress', sa.String(length=36), nullable=True), sa.Column('host_id', sa.String(length=255), nullable=False), sa.Column('mac_address', sa.String(length=32), nullable=False), sa.Column('network_type', sa.String(length=8), nullable=True), sa.Column('segment_id', sa.Integer(), nullable=True), sa.Column('local_endpoint', sa.String(length=64), nullable=False), sa.PrimaryKeyConstraint('id') ) op.create_index( op.f('ix_sfc_portpair_details_tenant_id'), 'sfc_portpair_details', ['tenant_id'], unique=False ) op.create_table('sfc_uuid_intid_associations', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=False), sa.Column('intid', sa.Integer(), nullable=False), sa.Column('type_', sa.String(length=32), nullable=False), sa.PrimaryKeyConstraint('id', 'uuid'), sa.UniqueConstraint('intid') ) op.create_table('sfc_path_nodes', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('nsp', sa.Integer(), nullable=False), sa.Column('nsi', sa.Integer(), nullable=False), sa.Column('node_type', sa.String(length=32), nullable=True), sa.Column('portchain_id', sa.String(length=255), nullable=True), sa.Column('status', sa.String(length=32), nullable=True), sa.Column('next_group_id', sa.Integer(), nullable=True), sa.Column('next_hop', sa.String(length=512), nullable=True), sa.ForeignKeyConstraint(['portchain_id'], ['sfc_port_chains.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id') ) op.create_index( op.f('ix_sfc_path_nodes_tenant_id'), 'sfc_path_nodes', ['tenant_id'], unique=False ) op.create_table('sfc_path_port_associations', sa.Column('pathnode_id', sa.String(length=36), nullable=False), sa.Column('portpair_id', sa.String(length=36), nullable=False), sa.Column('weight', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['pathnode_id'], ['sfc_path_nodes.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['portpair_id'], ['sfc_portpair_details.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('pathnode_id', 'portpair_id') ) ././@LongLink0000000000000000000000000000020500000000000011212 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/9768e6a66c9_flowclassifier_data_model.pynetworking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/9768e6a60000664000175000017500000000545413656750333033444 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defining flow-classifier data-model Revision ID: 9768e6a66c9 Revises: 24fc7241aa5 Create Date: 2015-09-30 17:54:35.852573 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '9768e6a66c9' down_revision = '24fc7241aa5' def upgrade(): op.create_table( 'sfc_flow_classifiers', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('ethertype', sa.String(length=40), nullable=True), sa.Column('protocol', sa.String(length=40), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('source_port_range_min', sa.Integer(), nullable=True), sa.Column('source_port_range_max', sa.Integer(), nullable=True), sa.Column('destination_port_range_min', sa.Integer(), nullable=True), sa.Column('destination_port_range_max', sa.Integer(), nullable=True), sa.Column('source_ip_prefix', sa.String(length=255), nullable=True), sa.Column('destination_ip_prefix', sa.String(length=255), nullable=True), sa.Column('logical_source_port', sa.String(length=36), nullable=False), sa.Column('logical_destination_port', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['logical_source_port'], ['ports.id'], ondelete='RESTRICT'), sa.ForeignKeyConstraint(['logical_destination_port'], ['ports.id'], ondelete='RESTRICT'), sa.PrimaryKeyConstraint('id') ) op.create_table( 'sfc_flow_classifier_l7_parameters', sa.Column('keyword', sa.String(length=255), nullable=False), sa.Column('value', sa.String(length=255), nullable=True), sa.Column('classifier_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['classifier_id'], ['sfc_flow_classifiers.id'], ), sa.PrimaryKeyConstraint('keyword', 'classifier_id') ) ././@LongLink0000000000000000000000000000020300000000000011210 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/d1002a1f97f6_update_flow_classifier.pynetworking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/d1002a1f0000664000175000017500000000210513656750333033451 0ustar zuulzuul00000000000000# Copyright 2016 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """update flow classifier Revision ID: d1002a1f97f6 Revises: 5a475fc853e6 Create Date: 2016-06-03 10:23:52.850934 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'd1002a1f97f6' down_revision = '5a475fc853e6' def upgrade(): op.alter_column('sfc_flow_classifiers', 'logical_source_port', nullable=True, existing_type=sa.String(length=36), existing_nullable=False) networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/mitaka/contract/0000775000175000017500000000000013656750461032752 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/mitaka/contract/48072cb59133_initial.pynetworking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/mitaka/contract/48072c0000664000175000017500000000171213656750333033523 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Initial Mitaka no-op script. Revision ID: 48072cb59133 Revises: start_networking_sfc Create Date: 2015-07-28 22:18:13.330846 """ from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '48072cb59133' down_revision = 'start_networking_sfc' branch_labels = (cli.CONTRACT_BRANCH,) def upgrade(): pass networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/queens/0000775000175000017500000000000013656750461031167 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/queens/expand/0000775000175000017500000000000013656750461032446 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000021200000000000011210 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/queens/expand/a3ad63aa834f_extra_attributes_for_pathnode.pynetworking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/queens/expand/a3ad63aa0000664000175000017500000000251513656750333033655 0ustar zuulzuul00000000000000# Copyright (c) 2017 One Convergence Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """extra attributes for pathnode Revision ID: a3ad63aa834f Revises: 8329e9be2d8a Create Date: 2017-08-03 13:57:59.908621 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'a3ad63aa834f' down_revision = 'd6fb381b65f2' def upgrade(): op.add_column('sfc_path_nodes', sa.Column('tap_enabled', sa.Boolean(), nullable=False, server_default=sa.sql.false())) op.add_column('sfc_path_nodes', sa.Column('previous_node_id', sa.String(length=36))) op.create_foreign_key('node_fk', 'sfc_path_nodes', 'sfc_path_nodes', ['previous_node_id'], ['id'], ondelete='SET NULL') ././@LongLink0000000000000000000000000000022200000000000011211 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/queens/expand/d6fb381b65f2_tap_enabled_attribute_port_pair_group.pynetworking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/queens/expand/d6fb381b0000664000175000017500000000222413656750333033606 0ustar zuulzuul00000000000000# Copyright (c) 2017 One Convergence Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """add tap_enabled attribute to port-pair-group Revision ID: d6fb381b65f2 Revises: a3ad63aa834f Create Date: 2017-08-03 13:57:59.908621 """ # revision identifiers, used by Alembic. revision = 'd6fb381b65f2' down_revision = '53ed5bec6cff' def upgrade(): op.add_column('sfc_port_pair_groups', sa.Column('tap_enabled', sa.Boolean, server_default=sa.sql.false(), nullable=False) ) ././@LongLink0000000000000000000000000000021300000000000011211 Lustar 00000000000000networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/queens/expand/53ed5bec6cff_add_service_graph_api_resource.pynetworking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/versions/queens/expand/53ed5bec0000664000175000017500000000600513656750333033667 0ustar zuulzuul00000000000000# Copyright 2017 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add Service Graph API resource Revision ID: 53ed5bec6cff Revises: 8329e9be2d8a Create Date: 2017-05-24 00:00:00.000000 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '53ed5bec6cff' down_revision = '8329e9be2d8a' def upgrade(): op.create_table('sfc_service_graphs', sa.Column('project_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id'), mysql_engine='InnoDB') op.create_index(op.f('ix_sfc_service_graphs_project_id'), 'sfc_service_graphs', ['project_id'], unique=False) op.create_table('sfc_service_graph_chain_associations', sa.Column('service_graph_id', sa.String(length=36), nullable=False), sa.Column('src_chain', sa.String(length=36), nullable=False), sa.Column('dst_chain', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['dst_chain'], ['sfc_port_chains.id'], ondelete='RESTRICT'), sa.ForeignKeyConstraint(['service_graph_id'], ['sfc_service_graphs.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['src_chain'], ['sfc_port_chains.id'], ondelete='RESTRICT'), sa.PrimaryKeyConstraint('service_graph_id', 'src_chain', 'dst_chain'), mysql_engine='InnoDB') networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/env.py0000664000175000017500000000707713656750333027172 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic import context from neutron_lib.db import model_base from oslo_config import cfg import sqlalchemy as sa from sqlalchemy import event from neutron.db.migration.alembic_migrations import external from neutron.db.migration import autogen from neutron.db.migration.connection import DBConnection from networking_sfc.db.migration.models import head # noqa try: # NOTE(mriedem): This is to register the DB2 alembic code which # is an optional runtime dependency. from ibm_db_alembic.ibm_db import IbmDbImpl # noqa # pylint: disable=unused-import except ImportError: pass MYSQL_ENGINE = None SFC_VERSION_TABLE = 'alembic_version_sfc' config = context.config neutron_config = config.neutron_config target_metadata = model_base.BASEV2.metadata def set_mysql_engine(): try: mysql_engine = neutron_config.command.mysql_engine except cfg.NoSuchOptError: mysql_engine = None global MYSQL_ENGINE MYSQL_ENGINE = (mysql_engine or model_base.BASEV2.__table_args__['mysql_engine']) def include_object(object_, name, type_, reflected, compare_to): if type_ == 'table' and name in external.TABLES: return False elif type_ == 'index' and reflected and name.startswith("idx_autoinc_"): # skip indexes created by SQLAlchemy autoincrement=True # on composite PK integer columns return False return True def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with either a URL or an Engine. Calls to context.execute() here emit the given string to the script output. """ set_mysql_engine() kwargs = dict() if neutron_config.database.connection: kwargs['url'] = neutron_config.database.connection else: kwargs['dialect_name'] = neutron_config.database.engine kwargs['include_object'] = include_object kwargs['version_table'] = SFC_VERSION_TABLE context.configure(**kwargs) with context.begin_transaction(): context.run_migrations() @event.listens_for(sa.Table, 'after_parent_attach') def set_storage_engine(target, parent): if MYSQL_ENGINE: target.kwargs['mysql_engine'] = MYSQL_ENGINE def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ set_mysql_engine() connection = config.attributes.get('connection') with DBConnection(neutron_config.database.connection, connection) as conn: context.configure( connection=conn, target_metadata=target_metadata, version_table=SFC_VERSION_TABLE, include_object=include_object, process_revision_directives=autogen.process_revision_directives ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() networking-sfc-10.0.0/networking_sfc/db/migration/alembic_migrations/script.py.mako0000664000175000017500000000202113656750333030614 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa ${imports if imports else ""} """${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} % if branch_labels: branch_labels = ${repr(branch_labels)} %endif def upgrade(): ${upgrades if upgrades else "pass"} networking-sfc-10.0.0/networking_sfc/db/sfc_db.py0000664000175000017500000012274713656750333022003 0ustar zuulzuul00000000000000# Copyright 2015 Futurewei. All rights reserved. # Copyright 2017 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy.ext.orderinglist import ordering_list from sqlalchemy import orm from sqlalchemy.orm import backref from sqlalchemy.orm.collections import attribute_mapped_collection from sqlalchemy.orm import exc from neutron.db import models_v2 from neutron_lib.db import api as db_api from neutron_lib.db import constants as db_const from neutron_lib.db import model_base from neutron_lib.db import model_query from neutron_lib.db import utils as db_utils from networking_sfc.db import flowclassifier_db as fc_db from networking_sfc.extensions import flowclassifier as ext_fc from networking_sfc.extensions import servicegraph as ext_sg from networking_sfc.extensions import sfc as ext_sfc from networking_sfc.extensions import tap as ext_tap LOG = logging.getLogger(__name__) UUID_LEN = 36 PARAM_LEN = 255 VAR_MAX_LEN = 1024 class ChainParameter(model_base.BASEV2): """Represents a single chain parameter.""" __tablename__ = 'sfc_port_chain_parameters' keyword = sa.Column(sa.String(PARAM_LEN), primary_key=True) value = sa.Column(sa.String(PARAM_LEN)) chain_id = sa.Column( sa.String(UUID_LEN), sa.ForeignKey('sfc_port_chains.id', ondelete='CASCADE'), primary_key=True) class ServiceFunctionParam(model_base.BASEV2): """Represents a service function parameter.""" __tablename__ = 'sfc_service_function_params' keyword = sa.Column(sa.String(PARAM_LEN), primary_key=True) value = sa.Column(sa.String(PARAM_LEN)) pair_id = sa.Column( sa.String(UUID_LEN), sa.ForeignKey('sfc_port_pairs.id', ondelete='CASCADE'), primary_key=True) class PortPairGroupParam(model_base.BASEV2): """Represents a port pair group parameter.""" __tablename__ = 'sfc_port_pair_group_params' keyword = sa.Column(sa.String(PARAM_LEN), primary_key=True) value = sa.Column(sa.String(VAR_MAX_LEN)) pair_group_id = sa.Column( sa.String(UUID_LEN), sa.ForeignKey('sfc_port_pair_groups.id', ondelete='CASCADE'), primary_key=True) class ChainClassifierAssoc(model_base.BASEV2): """Relation table between sfc_port_chains and flow_classifiers.""" __tablename__ = 'sfc_chain_classifier_associations' flowclassifier_id = sa.Column( sa.String(UUID_LEN), sa.ForeignKey('sfc_flow_classifiers.id', ondelete='RESTRICT'), primary_key=True, nullable=False, unique=True) portchain_id = sa.Column( sa.String(UUID_LEN), sa.ForeignKey('sfc_port_chains.id', ondelete='CASCADE'), primary_key=True) flow_classifier = orm.relationship( fc_db.FlowClassifier, backref=backref('chain_classifier_association', uselist=False), uselist=False ) class PortPair(model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents the ingress and egress ports for a single service function. """ __tablename__ = 'sfc_port_pairs' name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) description = sa.Column(sa.String(db_const.DESCRIPTION_FIELD_SIZE)) ingress = sa.Column( sa.String(UUID_LEN), sa.ForeignKey('ports.id', ondelete='RESTRICT'), nullable=False) egress = sa.Column( sa.String(UUID_LEN), sa.ForeignKey('ports.id', ondelete='RESTRICT'), nullable=False) portpairgroup_id = sa.Column( sa.String(UUID_LEN), sa.ForeignKey('sfc_port_pair_groups.id', ondelete='RESTRICT')) service_function_parameters = orm.relationship( ServiceFunctionParam, collection_class=attribute_mapped_collection('keyword'), cascade='all, delete-orphan') __table_args__ = ( sa.UniqueConstraint( ingress, egress, name='uniq_sfc_port_pairs0ingress0egress' ), model_base.BASEV2.__table_args__ ) class ChainGroupAssoc(model_base.BASEV2): """Relation table between sfc_port_chains and sfc_port_pair_groups.""" __tablename__ = 'sfc_chain_group_associations' portpairgroup_id = sa.Column( sa.String(UUID_LEN), sa.ForeignKey('sfc_port_pair_groups.id', ondelete='RESTRICT'), primary_key=True, nullable=False) portchain_id = sa.Column( sa.String(UUID_LEN), sa.ForeignKey('sfc_port_chains.id', ondelete='CASCADE'), primary_key=True) position = sa.Column(sa.Integer) class PortPairGroup(model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a port pair group model.""" __tablename__ = 'sfc_port_pair_groups' group_id = sa.Column(sa.Integer(), unique=True, nullable=False) name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) description = sa.Column(sa.String(db_const.DESCRIPTION_FIELD_SIZE)) tap_enabled = sa.Column(sa.Boolean(), server_default=sa.sql.false(), nullable=False) port_pairs = orm.relationship( PortPair, backref='port_pair_group' ) port_pair_group_parameters = orm.relationship( PortPairGroupParam, collection_class=attribute_mapped_collection('keyword'), cascade='all, delete-orphan') chain_group_associations = orm.relationship( ChainGroupAssoc, backref='port_pair_groups') class PortChain(model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a Neutron service function Port Chain.""" __tablename__ = 'sfc_port_chains' chain_id = sa.Column(sa.Integer(), unique=True, nullable=False) name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) description = sa.Column(sa.String(db_const.DESCRIPTION_FIELD_SIZE)) chain_group_associations = orm.relationship( ChainGroupAssoc, backref='port_chain', order_by="ChainGroupAssoc.position", collection_class=ordering_list('position'), cascade='all, delete-orphan') chain_classifier_associations = orm.relationship( ChainClassifierAssoc, backref='port_chain', cascade='all, delete-orphan') chain_parameters = orm.relationship( ChainParameter, collection_class=attribute_mapped_collection('keyword'), cascade='all, delete-orphan') class GraphChainAssoc(model_base.BASEV2): """Relation table between Service Graphs and src+dst Port Chains.""" __tablename__ = 'sfc_service_graph_chain_associations' service_graph_id = sa.Column( sa.String(UUID_LEN), sa.ForeignKey('sfc_service_graphs.id', ondelete='CASCADE'), primary_key=True, nullable=False) src_chain = sa.Column( sa.String(UUID_LEN), sa.ForeignKey('sfc_port_chains.id', ondelete='RESTRICT'), primary_key=True, nullable=False) dst_chain = sa.Column( sa.String(UUID_LEN), sa.ForeignKey('sfc_port_chains.id', ondelete='RESTRICT'), primary_key=True, nullable=False) class ServiceGraph(model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a Neutron Service Function Chain Graph.""" __tablename__ = 'sfc_service_graphs' name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) description = sa.Column(sa.String(db_const.DESCRIPTION_FIELD_SIZE)) graph_chain_associations = orm.relationship( GraphChainAssoc, backref='service_graph', cascade='all, delete-orphan') class SfcDbPlugin( ext_sfc.SfcPluginBase, ext_sg.ServiceGraphPluginBase ): """Mixin class to add port chain to db_plugin_base_v2.""" def _make_port_chain_dict(self, port_chain, fields=None): res = { 'id': port_chain['id'], 'name': port_chain['name'], 'project_id': port_chain['project_id'], 'description': port_chain['description'], 'port_pair_groups': [ assoc['portpairgroup_id'] for assoc in port_chain['chain_group_associations'] ], 'flow_classifiers': [ assoc['flowclassifier_id'] for assoc in port_chain['chain_classifier_associations'] ], 'chain_parameters': { param['keyword']: jsonutils.loads(param['value']) for k, param in port_chain['chain_parameters'].items() }, 'chain_id': port_chain['chain_id'], } return db_utils.resource_fields(res, fields) def _validate_port_pair_groups(self, context, pg_ids, pc_id=None): with db_api.CONTEXT_READER.using(context): prev_pg_tap_enabled = False for pg_id in pg_ids: pg = self._get_port_pair_group(context, pg_id) curr_pg_tap_enabled = pg['tap_enabled'] if prev_pg_tap_enabled and curr_pg_tap_enabled: raise ext_tap.ConsecutiveTapPPGNotSupported() else: prev_pg_tap_enabled = curr_pg_tap_enabled query = model_query.query_with_hooks(context, PortChain) for port_chain_db in query.all(): if port_chain_db['id'] == pc_id: continue pc_pg_ids = [ assoc['portpairgroup_id'] for assoc in port_chain_db.chain_group_associations ] if pc_pg_ids and pg_ids and pc_pg_ids == pg_ids: raise ext_sfc.InvalidPortPairGroups( port_pair_groups=pg_ids, port_chain=port_chain_db.id) def _validate_correlation_consistency(self, context, ppg_ids, pc_corr): # format like in ServiceFunctionParam.value to aid comparison later: pc_corr = jsonutils.dumps(pc_corr) with db_api.CONTEXT_READER.using(context): for ppg_id in ppg_ids: ppg = self._get_port_pair_group(context, ppg_id) for pp in ppg['port_pairs']: pp_corr = pp['service_function_parameters']['correlation'] if pp_corr.value != 'null' and pp_corr.value != pc_corr: raise ext_sfc.PortChainInconsistentCorrelations( ppg=ppg_id) def _validate_flow_classifiers(self, context, fc_ids, pc_id=None): with db_api.CONTEXT_READER.using(context): fcs = [ self._get_flow_classifier(context, fc_id) for fc_id in fc_ids ] for fc in fcs: fc_assoc = fc.chain_classifier_association if fc_assoc and fc_assoc['portchain_id'] != pc_id: raise ext_fc.FlowClassifierInUse(id=fc.id) query = model_query.query_with_hooks(context, PortChain) for port_chain_db in query.all(): if port_chain_db['id'] == pc_id: continue pc_fc_ids = [ assoc['flowclassifier_id'] for assoc in port_chain_db.chain_classifier_associations ] pc_fcs = [ self._get_flow_classifier(context, pc_fc_id) for pc_fc_id in pc_fc_ids ] for pc_fc in pc_fcs: for fc in fcs: fc_cls = fc_db.FlowClassifierDbPlugin if fc_cls.flowclassifier_basic_conflict( pc_fc, fc ): raise ext_sfc.PortChainFlowClassifierInConflict( fc_id=fc['id'], pc_id=port_chain_db['id'], pc_fc_id=pc_fc['id'] ) def _setup_chain_group_associations( self, context, port_chain, pg_ids ): with db_api.CONTEXT_READER.using(context): chain_group_associations = [] for pg_id in pg_ids: query = model_query.query_with_hooks(context, ChainGroupAssoc) chain_group_association = query.filter_by( portchain_id=port_chain.id, portpairgroup_id=pg_id ).first() if not chain_group_association: chain_group_association = ChainGroupAssoc( portpairgroup_id=pg_id ) chain_group_associations.append(chain_group_association) port_chain.chain_group_associations = chain_group_associations def _setup_chain_classifier_associations( self, context, port_chain, fc_ids ): with db_api.CONTEXT_READER.using(context): chain_classifier_associations = [] for fc_id in fc_ids: query = model_query.query_with_hooks( context, ChainClassifierAssoc) chain_classifier_association = query.filter_by( portchain_id=port_chain.id, flowclassifier_id=fc_id ).first() if not chain_classifier_association: chain_classifier_association = ChainClassifierAssoc( flowclassifier_id=fc_id ) chain_classifier_associations.append( chain_classifier_association) port_chain.chain_classifier_associations = ( chain_classifier_associations) @log_helpers.log_method_call def create_port_chain(self, context, port_chain): """Create a port chain.""" pc = port_chain['port_chain'] project_id = pc['project_id'] chain_id = pc['chain_id'] with db_api.CONTEXT_WRITER.using(context): chain_parameters = { key: ChainParameter(keyword=key, value=jsonutils.dumps(val)) for key, val in pc['chain_parameters'].items()} ppg_ids = pc['port_pair_groups'] fc_ids = pc['flow_classifiers'] self._validate_port_pair_groups(context, ppg_ids) self._validate_correlation_consistency( context, ppg_ids, pc['chain_parameters']['correlation']) self._validate_flow_classifiers(context, fc_ids) assigned_chain_ids = {} query = context.session.query(PortChain) for port_chain_db in query.all(): assigned_chain_ids[port_chain_db['chain_id']] = ( port_chain_db['id'] ) if not chain_id: available_chain_id = 1 while available_chain_id < ext_sfc.MAX_CHAIN_ID: if available_chain_id not in assigned_chain_ids: chain_id = available_chain_id break available_chain_id += 1 if not chain_id: raise ext_sfc.PortChainUnavailableChainId() else: if chain_id in assigned_chain_ids: raise ext_sfc.PortChainChainIdInConflict( chain_id=chain_id, pc_id=assigned_chain_ids[chain_id]) port_chain_db = PortChain(id=uuidutils.generate_uuid(), project_id=project_id, description=pc['description'], name=pc['name'], chain_parameters=chain_parameters, chain_id=chain_id) self._setup_chain_group_associations( context, port_chain_db, ppg_ids) self._setup_chain_classifier_associations( context, port_chain_db, fc_ids) context.session.add(port_chain_db) return self._make_port_chain_dict(port_chain_db) @log_helpers.log_method_call def get_port_chains(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False, default_sg=False): marker_obj = db_utils.get_marker_obj(self, context, 'port_chain', limit, marker) return model_query.get_collection( context, PortChain, self._make_port_chain_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def get_port_chains_count(self, context, filters=None): return model_query.get_collection_count( context, PortChain, filters=filters) @log_helpers.log_method_call def get_port_chain(self, context, id, fields=None): portchain = self._get_port_chain(context, id) return self._make_port_chain_dict(portchain, fields) @log_helpers.log_method_call def _get_port_chain(self, context, id): try: return model_query.get_by_id(context, PortChain, id) except exc.NoResultFound: raise ext_sfc.PortChainNotFound(id=id) @log_helpers.log_method_call def delete_port_chain(self, context, id): # if this port chain is part of a graph, abort delete: if self._any_port_chains_in_a_graph(context, {id}): raise ext_sg.ServiceGraphPortChainInUse(id=id) try: with db_api.CONTEXT_WRITER.using(context): pc = self._get_port_chain(context, id) context.session.delete(pc) except ext_sfc.PortChainNotFound: LOG.info("Deleting a non-existing port chain.") @log_helpers.log_method_call def update_port_chain(self, context, id, port_chain): pc = port_chain['port_chain'] # if this port chain is part of a graph, abort non-neutral updates: if self._any_port_chains_in_a_graph(context, {id}): if 'flow_classifiers' in pc or 'port_pair_groups' in pc: raise ext_sg.ServiceGraphPortChainInUse(id=id) with db_api.CONTEXT_WRITER.using(context): pc_db = self._get_port_chain(context, id) for k, v in pc.items(): if k == 'flow_classifiers': self._validate_flow_classifiers( context, v, pc_id=id) self._setup_chain_classifier_associations( context, pc_db, v) elif k == 'port_pair_groups': self._validate_port_pair_groups( context, v, pc_id=id) self._setup_chain_group_associations( context, pc_db, v) else: pc_db[k] = v return self._make_port_chain_dict(pc_db) def _make_port_pair_dict(self, port_pair, fields=None): res = { 'id': port_pair['id'], 'name': port_pair['name'], 'description': port_pair['description'], 'project_id': port_pair['project_id'], 'ingress': port_pair['ingress'], 'egress': port_pair['egress'], 'service_function_parameters': { param['keyword']: jsonutils.loads(param['value']) for k, param in port_pair['service_function_parameters'].items() } } return db_utils.resource_fields(res, fields) def _validate_port_pair_ingress_egress(self, ingress, egress): if 'device_id' not in ingress or not ingress['device_id']: raise ext_sfc.PortPairIngressNoHost( ingress=ingress['id'] ) if 'device_id' not in egress or not egress['device_id']: raise ext_sfc.PortPairEgressNoHost( egress=egress['id'] ) if ingress['device_id'] != egress['device_id']: raise ext_sfc.PortPairIngressEgressDifferentHost( ingress=ingress['id'], egress=egress['id']) @log_helpers.log_method_call def create_port_pair(self, context, port_pair): """Create a port pair.""" pp = port_pair['port_pair'] project_id = pp['project_id'] with db_api.CONTEXT_WRITER.using(context): query = model_query.query_with_hooks(context, PortPair) pp_in_use = query.filter_by( ingress=pp['ingress'], egress=pp['egress'] ).first() if pp_in_use: raise ext_sfc.PortPairIngressEgressInUse( ingress=pp['ingress'], egress=pp['egress'], id=pp_in_use['id'] ) service_function_parameters = { key: ServiceFunctionParam( keyword=key, value=jsonutils.dumps(val)) for key, val in pp['service_function_parameters'].items() } ingress = self._get_port(context, pp['ingress']) egress = self._get_port(context, pp['egress']) self._validate_port_pair_ingress_egress(ingress, egress) port_pair_db = PortPair( id=uuidutils.generate_uuid(), name=pp['name'], description=pp['description'], project_id=project_id, ingress=pp['ingress'], egress=pp['egress'], service_function_parameters=service_function_parameters ) context.session.add(port_pair_db) return self._make_port_pair_dict(port_pair_db) @log_helpers.log_method_call def get_port_pairs(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = db_utils.get_marker_obj(self, context, 'port_pair', limit, marker) return model_query.get_collection( context, PortPair, self._make_port_pair_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def get_port_pairs_count(self, context, filters=None): return model_query.get_collection_count( context, PortPair, filters=filters) @log_helpers.log_method_call def get_port_pair(self, context, id, fields=None): port_pair = self._get_port_pair(context, id) return self._make_port_pair_dict(port_pair, fields) def _get_port_pair(self, context, id): try: return model_query.get_by_id(context, PortPair, id) except exc.NoResultFound: raise ext_sfc.PortPairNotFound(id=id) def _get_port(self, context, id): try: return model_query.get_by_id(context, models_v2.Port, id) except exc.NoResultFound: raise ext_sfc.PortPairPortNotFound(id=id) @log_helpers.log_method_call def update_port_pair(self, context, id, port_pair): new_pp = port_pair['port_pair'] with db_api.CONTEXT_WRITER.using(context): old_pp = self._get_port_pair(context, id) old_pp.update(new_pp) return self._make_port_pair_dict(old_pp) @log_helpers.log_method_call def delete_port_pair(self, context, id): try: with db_api.CONTEXT_WRITER.using(context): pp = self._get_port_pair(context, id) if pp.portpairgroup_id: raise ext_sfc.PortPairInUse(id=id) context.session.delete(pp) except ext_sfc.PortPairNotFound: LOG.info("Deleting a non-existing port pair.") def _make_port_pair_group_dict(self, port_pair_group, fields=None): res = { 'id': port_pair_group['id'], 'name': port_pair_group['name'], 'description': port_pair_group['description'], 'project_id': port_pair_group['project_id'], 'port_pairs': [pp['id'] for pp in port_pair_group['port_pairs']], 'tap_enabled': port_pair_group['tap_enabled'], 'port_pair_group_parameters': { param['keyword']: jsonutils.loads(param['value']) for k, param in port_pair_group['port_pair_group_parameters'].items() }, 'group_id': port_pair_group.get('group_id') or 0 } return db_utils.resource_fields(res, fields) def _validate_pps_in_ppg(self, portpairs_list, id=None): first_check = True correlation = None for portpair in portpairs_list: sfparams = portpair.service_function_parameters pp_corr = sfparams['correlation'] if first_check: first_check = False correlation = pp_corr.value if pp_corr.value != correlation: # don't include PPs of different correlations raise ext_sfc.InconsistentCorrelations() if ( portpair.portpairgroup_id and portpair.portpairgroup_id != id ): # don't include PPs included by other PPGs raise ext_sfc.PortPairInUse(id=portpair.id) @log_helpers.log_method_call def create_port_pair_group(self, context, port_pair_group): """Create a port pair group.""" pg = port_pair_group['port_pair_group'] project_id = pg['project_id'] with db_api.CONTEXT_WRITER.using(context): portpairs_list = [self._get_port_pair(context, pp_id) for pp_id in pg['port_pairs']] self._validate_pps_in_ppg(portpairs_list) if pg['tap_enabled'] and len(portpairs_list) > 1: raise ext_tap.MultiplePortPairsInTapPPGNotSupported() port_pair_group_parameters = { key: PortPairGroupParam( keyword=key, value=jsonutils.dumps(val)) for key, val in pg['port_pair_group_parameters'].items() } assigned_group_ids = {} query = context.session.query(PortPairGroup) for port_pair_group_db in query.all(): assigned_group_ids[port_pair_group_db['group_id']] = ( port_pair_group_db['id'] ) group_id = 0 available_group_id = 1 while True: if available_group_id not in assigned_group_ids: group_id = available_group_id break available_group_id += 1 port_pair_group_db = PortPairGroup( id=uuidutils.generate_uuid(), name=pg['name'], description=pg['description'], project_id=project_id, port_pairs=portpairs_list, port_pair_group_parameters=port_pair_group_parameters, group_id=group_id, tap_enabled=pg['tap_enabled'] ) context.session.add(port_pair_group_db) return self._make_port_pair_group_dict(port_pair_group_db) @log_helpers.log_method_call def get_port_pair_groups(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = db_utils.get_marker_obj(self, context, 'port_pair_group', limit, marker) return model_query.get_collection( context, PortPairGroup, self._make_port_pair_group_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def get_port_pair_groups_count(self, context, filters=None): return model_query.get_collection_count( context, PortPairGroup, filters=filters) @log_helpers.log_method_call def get_port_pair_group(self, context, id, fields=None): port_pair_group = self._get_port_pair_group(context, id) return self._make_port_pair_group_dict(port_pair_group, fields) def _get_port_pair_group(self, context, id): try: return model_query.get_by_id(context, PortPairGroup, id) except exc.NoResultFound: raise ext_sfc.PortPairGroupNotFound(id=id) def _get_flow_classifier(self, context, id): try: return model_query.get_by_id(context, fc_db.FlowClassifier, id) except exc.NoResultFound: raise ext_fc.FlowClassifierNotFound(id=id) @log_helpers.log_method_call def update_port_pair_group(self, context, id, port_pair_group): new_pg = port_pair_group['port_pair_group'] with db_api.CONTEXT_WRITER.using(context): portpairs_list = [self._get_port_pair(context, pp_id) for pp_id in new_pg.get('port_pairs', [])] self._validate_pps_in_ppg(portpairs_list, id) old_pg = self._get_port_pair_group(context, id) if old_pg['tap_enabled'] and len(portpairs_list) > 1: raise ext_tap.MultiplePortPairsInTapPPGNotSupported() for k, v in new_pg.items(): if k == 'port_pairs': port_pairs = [ self._get_port_pair(context, pp_id) for pp_id in v ] old_pg.port_pairs = port_pairs else: old_pg[k] = v return self._make_port_pair_group_dict(old_pg) @log_helpers.log_method_call def delete_port_pair_group(self, context, id): try: with db_api.CONTEXT_WRITER.using(context): pg = self._get_port_pair_group(context, id) if pg.chain_group_associations: raise ext_sfc.PortPairGroupInUse(id=id) context.session.delete(pg) except ext_sfc.PortPairGroupNotFound: LOG.info("Deleting a non-existing port pair group.") def _graph_assocs_to_pc_dict(self, assocs): assoc_dict = {} for assoc in assocs: if assoc.src_chain in assoc_dict: assoc_dict[assoc.src_chain].append(assoc.dst_chain) else: assoc_dict[assoc.src_chain] = [assoc.dst_chain] return assoc_dict def _make_service_graph_dict(self, graph_db, fields=None): res = { 'id': graph_db['id'], 'name': graph_db['name'], 'project_id': graph_db['project_id'], 'description': graph_db['description'], 'port_chains': self._graph_assocs_to_pc_dict( graph_db['graph_chain_associations']) } return db_utils.resource_fields(res, fields) def _make_graph_chain_assoc_dict(self, assoc_db, fields=None): res = { 'service_graph_id': assoc_db['service_graph_id'], 'src_chain': assoc_db['src_chain'], 'dst_chain': assoc_db['dst_chain'] } return db_utils.resource_fields(res, fields) def _is_there_a_loop(self, parenthood, current_chain, traversed): if current_chain not in parenthood: return False if current_chain not in traversed: loop_status = False traversed.append(current_chain) for port_chain in parenthood[current_chain]: loop_status = loop_status or self._is_there_a_loop( parenthood, port_chain, list(traversed)) return loop_status return True def _any_port_chains_in_a_graph(self, context, port_chains=set(), graph_id=None): if not port_chains: return False with db_api.CONTEXT_READER.using(context): query = model_query.query_with_hooks(context, ServiceGraph) for graph_db in query.all(): if graph_db['id'] == graph_id: continue pc_ids = [ assoc['src_chain'] for assoc in graph_db.graph_chain_associations ] pc_ids.extend([ assoc['dst_chain'] for assoc in graph_db.graph_chain_associations ]) if pc_ids and port_chains and set( pc_ids).intersection(port_chains): return True return False def _validate_port_chains_for_graph(self, context, port_chains, graph_id=None): # create a list of all port-chains that will be associated all_port_chains = set() for src_chain in port_chains: all_port_chains.add(src_chain) for dst_chain in port_chains[src_chain]: all_port_chains.add(dst_chain) # check if any of the port-chains are already in a graph if self._any_port_chains_in_a_graph( context, all_port_chains, graph_id): raise ext_sg.ServiceGraphInvalidPortChains( port_chains=port_chains) # dict whose keys are PCs and values are lists of dependency-PCs # (PCs incoming to the point where the key is a outgoing) parenthood = {} encapsulation = None fc_cls = fc_db.FlowClassifierDbPlugin for src_chain in port_chains: src_pc = self._get_port_chain(context, src_chain) curr_corr = src_pc.chain_parameters['correlation']['value'] # guarantee that branching PPG supports correlation assocs = src_pc.chain_group_associations src_ppg = max(assocs, key=(lambda ppg: ppg.position)) ppg_id = src_ppg['portpairgroup_id'] ppg = self._get_port_pair_group(context, ppg_id) for pp in ppg.port_pairs: sfparams = pp['service_function_parameters'] if sfparams['correlation']['value'] != curr_corr: raise ext_sg.ServiceGraphImpossibleBranching() # verify encapsulation consistency across all PCs (part 1) if not encapsulation: encapsulation = curr_corr elif encapsulation != curr_corr: raise ext_sg.ServiceGraphInconsistentEncapsulation() # list of all port chains at this branching point: branching_point = [] # list of every flow classifier at this branching point: fcs_for_src_chain = [] for dst_chain in port_chains[src_chain]: # check if the current destination PC was already added if dst_chain in branching_point: raise ext_sg.ServiceGraphPortChainInConflict( pc_id=dst_chain) branching_point.append(dst_chain) dst_pc = self._get_port_chain(context, dst_chain) curr_corr = dst_pc.chain_parameters['correlation']['value'] # guarantee that destination PPG supports correlation assocs = dst_pc.chain_group_associations dst_ppg = min(assocs, key=(lambda ppg: ppg.position)) ppg_id = dst_ppg['portpairgroup_id'] ppg = self._get_port_pair_group(context, ppg_id) for pp in ppg.port_pairs: sfparams = pp['service_function_parameters'] if sfparams['correlation']['value'] != curr_corr: raise ext_sg.ServiceGraphImpossibleBranching() # verify encapsulation consistency across all PCs (part 2) if encapsulation != curr_corr: raise ext_sg.ServiceGraphInconsistentEncapsulation() dst_pc_dict = self._make_port_chain_dict(dst_pc) # acquire associated flow classifiers fcs = dst_pc_dict['flow_classifiers'] for fc_id in fcs: fc = self._get_flow_classifier(context, fc_id) fcs_for_src_chain.append(fc) # update list of every FC # update the parenthood dict if dst_chain in parenthood: parenthood[dst_chain].append(src_chain) else: parenthood[dst_chain] = [src_chain] # detect duplicate FCs, consequently branching ambiguity for i, fc1 in enumerate(fcs_for_src_chain): for fc2 in fcs_for_src_chain[i + 1:]: if(fc_cls.flowclassifier_basic_conflict(fc1, fc2)): raise ext_sg.\ ServiceGraphFlowClassifierInConflict( fc1_id=fc1['id'], fc2_id=fc2['id']) # check for circular paths within the graph via parenthood dict: for port_chain in parenthood: if self._is_there_a_loop(parenthood, port_chain, []): raise ext_sg.ServiceGraphLoopDetected() def _setup_graph_chain_associations(self, context, graph_db, port_chains): with db_api.CONTEXT_READER.using(context): graph_chain_associations = [] for src_chain in port_chains: query = model_query.query_with_hooks(context, GraphChainAssoc) for dst_chain in port_chains[src_chain]: graph_chain_association = query.filter_by( service_graph_id=graph_db.id, src_chain=src_chain, dst_chain=dst_chain).first() if not graph_chain_association: graph_chain_association = GraphChainAssoc( service_graph_id=graph_db.id, src_chain=src_chain, dst_chain=dst_chain ) graph_chain_associations.append(graph_chain_association) graph_db.graph_chain_associations = graph_chain_associations def _get_branches(self, context, filters): return model_query.get_collection( context, GraphChainAssoc, self._make_graph_chain_assoc_dict, filters=filters) @log_helpers.log_method_call def create_service_graph(self, context, service_graph): """Create a Service Graph.""" service_graph = service_graph['service_graph'] project_id = service_graph['project_id'] with db_api.CONTEXT_WRITER.using(context): port_chains = service_graph['port_chains'] self._validate_port_chains_for_graph(context, port_chains) graph_db = ServiceGraph(id=uuidutils.generate_uuid(), project_id=project_id, description=service_graph['description'], name=service_graph['name']) self._setup_graph_chain_associations( context, graph_db, port_chains) context.session.add(graph_db) return self._make_service_graph_dict(graph_db) @log_helpers.log_method_call def get_service_graphs(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Get Service Graphs.""" marker_obj = db_utils.get_marker_obj(self, context, 'service_graph', limit, marker) return model_query.get_collection( context, ServiceGraph, self._make_service_graph_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) @log_helpers.log_method_call def get_service_graph(self, context, id, fields=None): """Get a Service Graph.""" service_graph = self._get_service_graph(context, id) return self._make_service_graph_dict(service_graph, fields) @log_helpers.log_method_call def _get_service_graph(self, context, id): try: return model_query.get_by_id(context, ServiceGraph, id) except exc.NoResultFound: raise ext_sg.ServiceGraphNotFound(id=id) @log_helpers.log_method_call def update_service_graph(self, context, id, service_graph): """Update a Service Graph.""" service_graph = service_graph['service_graph'] with db_api.CONTEXT_WRITER.using(context): graph_db = self._get_service_graph(context, id) for k, v in service_graph.items(): if k == 'port_chains': self._validate_port_chains_for_graph( context, v, graph_id=id) self._setup_graph_chain_associations( context, graph_db, v) else: graph_db[k] = v return self._make_service_graph_dict(graph_db) @log_helpers.log_method_call def delete_service_graph(self, context, id): """Delete a Service Graph.""" try: with db_api.CONTEXT_WRITER.using(context): graph = self._get_service_graph(context, id) context.session.delete(graph) except ext_sfc.ServiceGraphNotFound: LOG.info("Deleting a non-existing Service Graph.") networking-sfc-10.0.0/networking_sfc/_i18n.py0000664000175000017500000000227013656750333021100 0ustar zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/usage.html . """ import oslo_i18n DOMAIN = "networking_sfc" _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # The contextual translation function using the name "_C" # requires oslo.i18n >=2.1.0 _C = _translators.contextual_form # The plural translation function using the name "_P" # requires oslo.i18n >=2.1.0 _P = _translators.plural_form def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) networking-sfc-10.0.0/test-requirements.txt0000664000175000017500000000130113656750333021021 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD flake8-import-order==0.12 # LGPLv3 mock>=2.0.0 # BSD requests-mock>=1.2.0 # Apache-2.0 testresources>=2.0.0 # Apache-2.0/BSD testtools>=2.2.0 # MIT testscenarios>=0.4 # Apache-2.0/BSD WebOb>=1.7.1 # MIT WebTest>=2.0.27 # MIT oslotest>=3.2.0 # Apache-2.0 stestr>=2.0.0 # Apache-2.0 astroid==1.6.5 # LGPLv2.1 pylint==1.9.2 # GPLv2 psycopg2>=2.7.7 # LGPL/ZPL PyMySQL>=0.7.6 # MIT License networking-sfc-10.0.0/lower-constraints.txt0000664000175000017500000000523213656750333021025 0ustar zuulzuul00000000000000alabaster==0.7.10 alembic==0.8.10 amqp==2.2.2 appdirs==1.4.3 asn1crypto==0.24.0 astroid==1.6.5 Babel==2.5.3 bcrypt==3.1.4 beautifulsoup4==4.6.0 cachetools==2.0.1 certifi==2018.1.18 cffi==1.11.5 chardet==3.0.4 cliff==2.11.0 cmd2==0.8.1 contextlib2==0.5.5 coverage==4.0 cryptography==2.1.4 debtcollector==1.19.0 decorator==4.2.1 deprecation==2.0 docutils==0.14 dogpile.cache==0.6.5 dulwich==0.19.0 enum-compat==0.0.2 eventlet==0.18.2 extras==1.0.0 fasteners==0.14.1 fixtures==3.0.0 flake8-import-order==0.12 flake8==2.5.5 future==0.16.0 futurist==1.6.0 greenlet==0.4.13 hacking==0.12.0 httplib2==0.10.3 idna==2.6 imagesize==1.0.0 iso8601==0.1.12 Jinja2==2.10 jmespath==0.9.3 jsonpatch==1.21 jsonpointer==2.0 jsonschema==2.6.0 keystoneauth1==3.4.0 keystonemiddleware==4.21.0 kombu==4.1.0 linecache2==1.0.0 logilab-common==1.4.1 logutils==0.3.5 Mako==1.0.7 MarkupSafe==1.0 mccabe==0.2.1 mock==2.0.0 monotonic==1.4 mox3==0.25.0 msgpack-python==0.5.6 msgpack==0.5.6 munch==2.2.0 netaddr==0.7.18 netifaces==0.10.6 neutron-lib==1.18.0 openstacksdk==0.12.0 os-client-config==1.29.0 os-service-types==1.2.0 os-xenapi==0.3.1 osc-lib==1.10.0 oslo.cache==1.29.0 oslo.concurrency==3.26.0 oslo.config==5.2.0 oslo.context==2.20.0 oslo.db==4.35.0 oslo.i18n==3.15.3 oslo.log==3.36.0 oslo.messaging==5.29.0 oslo.middleware==3.35.0 oslo.policy==1.34.0 oslo.privsep==1.28.0 oslo.reports==1.27.0 oslo.rootwrap==5.13.0 oslo.serialization==2.18.0 oslo.service==1.30.0 oslo.utils==3.33.0 oslo.versionedobjects==1.32.0 oslotest==3.2.0 osprofiler==2.0.0 ovs==2.8.1 ovsdbapp==0.10.0 packaging==17.1 paramiko==2.4.1 Paste==2.0.3 PasteDeploy==1.5.2 pbr==2.0.0 pecan==1.2.1 pep8==1.5.7 pika-pool==0.1.3 pika==0.10.0 prettytable==0.7.2 psutil==5.4.3 psycopg2==2.7.7 pyasn1==0.4.2 pycadf==2.7.0 pycodestyle==2.3.1 pycparser==2.18 pyflakes==0.8.1 Pygments==2.2.0 pyinotify==0.9.6 pylint==1.9.2 PyMySQL==0.7.6 PyNaCl==1.2.1 pyparsing==2.2.0 pyperclip==1.6.0 pyroute2==0.4.21 python-dateutil==2.7.0 python-designateclient==2.9.0 python-editor==1.0.3 python-keystoneclient==3.15.0 python-mimeparse==1.6.0 python-neutronclient==6.7.0 python-novaclient==10.1.0 python-subunit==1.2.0 pytz==2018.3 PyYAML==3.12 repoze.lru==0.7 requests-mock==1.2.0 requests==2.18.4 requestsexceptions==1.4.0 rfc3986==1.1.0 Routes==2.4.1 ryu==4.23 simplejson==3.13.2 six==1.10.0 snowballstemmer==1.2.1 sqlalchemy-migrate==0.11.0 SQLAlchemy==1.2.0 sqlparse==0.2.4 statsd==3.2.2 stestr==2.0.0 stevedore==1.20.0 Tempita==0.5.2 tenacity==4.9.0 testrepository==0.0.20 testresources==2.0.0 testscenarios==0.4 testtools==2.2.0 tinyrpc==0.8 traceback2==1.4.0 unittest2==1.1.0 urllib3==1.22 vine==1.1.4 voluptuous==0.11.1 waitress==1.1.0 WebOb==1.7.1 WebTest==2.0.27 wrapt==1.10.11 networking-sfc-10.0.0/AUTHORS0000664000175000017500000000726713656750461015653 0ustar zuulzuul0000000000000098k <18552437190@163.com> Aaron Rosen Adit Sarfaty Akihiro Motoki Akihiro Motoki Andrea Frittoli Andreas Jaeger Armando Migliaccio Bernard Cafarelli Boden R Brian Haley Cao Xuan Hoang Cathy Hong Zhang Ching Sun Ching Sun Claudiu Belu Cong Phuoc Hoang Corey Bryant Damian Szeluga Dao Cong Tien Dariusz Smigiel Doug Hellmann Doug Wiegley Farhad Sunavala Flavio Percoco Freya Dian Yu Gary Kotton Ghanshyam Mann Graham Hayes Ha Van Tu Haim Daniel Henry Gessau Henry Gessau Ian Wienand Igor Duarte Cardoso Ihar Hrachyshka Isaku Yamahata Jakub Libosvar James E. Blair James Page Kyle Mestery Le Hou Li-zhigang LiuYong Louis Fourie Manuel Buil Michel Peterson Mohankumar Na Nate Johnston Nguyen Phuong An Ondřej Nový OpenStack Release Bot Paul Carver Pavel Gluschak Pavel Glushchak Russell Bryant Ryan Moats Sean McGinnis Slawek Kaplonski SongmingYan Thomas Morin Thomas Morin Trinh Nguyen Tuan Do Anh Vieri <15050873171@163.com> Vikash082 Vu Cong Tuan YAMAMOTO Takashi YI-JIE,SYU Yushiro FURUKAWA Zhao Lei armando-migliaccio bhargavaregalla cathy chenyaguang chingsun dharmendra elajkat gengchc2 ghanshyam gong yong sheng guotao.bj huang.zhiping lfourie melissaml mohankumar_n pengyuesheng qinchunhua raofei reedip venkatamahesh vikram.choudhary wangfaxin wangqi xiaodongwang991481 xu-haiwei xurong00037997 yong sheng gong zhang.lei zhangyanxian networking-sfc-10.0.0/babel.cfg0000664000175000017500000000002113656750333016304 0ustar zuulzuul00000000000000[python: **.py] networking-sfc-10.0.0/CONTRIBUTING.rst0000664000175000017500000000121013656750333017220 0ustar zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: https://docs.openstack.org/infra/manual/developers.html If you already have a good understanding of how the system works and your OpenStack accounts are set up, you can skip to the development workflow section of this documentation to learn how changes to OpenStack should be submitted for review via the Gerrit tool: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/networking-sfc networking-sfc-10.0.0/networking_sfc.egg-info/0000775000175000017500000000000013656750461021303 5ustar zuulzuul00000000000000networking-sfc-10.0.0/networking_sfc.egg-info/entry_points.txt0000664000175000017500000000252013656750461024600 0ustar zuulzuul00000000000000[networking_sfc.flowclassifier.drivers] dummy = networking_sfc.services.flowclassifier.drivers.dummy.dummy:DummyDriver ovs = networking_sfc.services.flowclassifier.drivers.ovs.driver:OVSFlowClassifierDriver [networking_sfc.sfc.agent_drivers] ovs = networking_sfc.services.sfc.agent.extensions.openvswitch.sfc_driver:SfcOVSAgentDriver [networking_sfc.sfc.drivers] dummy = networking_sfc.services.sfc.drivers.dummy.dummy:DummyDriver ovs = networking_sfc.services.sfc.drivers.ovs.driver:OVSSfcDriver [neutron.agent.l2.extensions] sfc = networking_sfc.services.sfc.agent.extensions.sfc:SfcAgentExtension [neutron.db.alembic_migrations] networking-sfc = networking_sfc.db.migration:alembic_migrations [neutron.policies] networking-sfc = networking_sfc.policies:list_rules [neutron.service_plugins] flow_classifier = networking_sfc.services.flowclassifier.plugin:FlowClassifierPlugin sfc = networking_sfc.services.sfc.plugin:SfcPlugin [neutronclient.extension] flow_classifier = networking_sfc.cli.flow_classifier port_chain = networking_sfc.cli.port_chain port_pair = networking_sfc.cli.port_pair port_pair_group = networking_sfc.cli.port_pair_group [oslo.config.opts] networking-sfc = networking_sfc.opts:list_sfc_opts networking-sfc.quotas = networking_sfc.opts:list_quota_opts [oslo.policy.policies] networking-sfc = networking_sfc.policies:list_rules networking-sfc-10.0.0/networking_sfc.egg-info/dependency_links.txt0000664000175000017500000000000113656750461025351 0ustar zuulzuul00000000000000 networking-sfc-10.0.0/networking_sfc.egg-info/requires.txt0000664000175000017500000000051613656750461023705 0ustar zuulzuul00000000000000pbr!=2.1.0,>=2.0.0 eventlet!=0.18.3,!=0.20.1,>=0.18.2 netaddr>=0.7.18 python-neutronclient>=6.7.0 SQLAlchemy>=1.2.0 alembic>=0.8.10 six>=1.10.0 stevedore>=1.20.0 oslo.config>=5.2.0 oslo.i18n>=3.15.3 oslo.log>=3.36.0 oslo.messaging>=5.29.0 oslo.serialization!=2.19.1,>=2.18.0 oslo.utils>=3.33.0 neutron-lib>=1.18.0 neutron>=13.0.0.0b2 networking-sfc-10.0.0/networking_sfc.egg-info/pbr.json0000664000175000017500000000005613656750461022762 0ustar zuulzuul00000000000000{"git_version": "07f1759", "is_release": true}networking-sfc-10.0.0/networking_sfc.egg-info/PKG-INFO0000664000175000017500000001354413656750461022407 0ustar zuulzuul00000000000000Metadata-Version: 1.2 Name: networking-sfc Version: 10.0.0 Summary: APIs and implementations to support Service Function Chaining in Neutron. Home-page: https://docs.openstack.org/networking-sfc/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ============================================================ Service Function Chaining Extension for OpenStack Networking ============================================================ Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/networking-sfc.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on Service Function Chaining API ----------------------------- This project provides APIs and implementations to support Service Function Chaining in Neutron. Service Function Chaining is a mechanism for overriding the basic destination based forwarding that is typical of IP networks. It is conceptually related to Policy Based Routing in physical networks but it is typically thought of as a Software Defined Networking technology. It is often used in conjunction with security functions although it may be used for a broader range of features. Fundamentally SFC is the ability to cause network packet flows to route through a network via a path other than the one that would be chosen by routing table lookups on the packet's destination IP address. It is most commonly used in conjunction with Network Function Virtualization when recreating in a virtual environment a series of network functions that would have traditionally been implemented as a collection of physical network devices connected in series by cables. A very simple example of a service chain would be one that forces all traffic from point A to point B to go through a firewall even though the firewall is not literally between point A and B from a routing table perspective. A more complex example is an ordered series of functions, each implemented in multiple VMs, such that traffic must flow through one VM at each hop in the chain but the network uses a hashing algorithm to distribute different flows across multiple VMs at each hop. This is an initial release, feedback is requested from users and the API may evolve based on that feedback. * Free software: Apache license * Source: https://opendev.org/openstack/networking-sfc * Documentation: https://docs.openstack.org/networking-sfc/latest * Overview: https://launchpad.net/networking-sfc * Bugs: https://bugs.launchpad.net/networking-sfc * Blueprints: https://blueprints.launchpad.net/networking-sfc * Wiki: https://wiki.openstack.org/wiki/Neutron/ServiceInsertionAndChaining * Release notes: https://docs.openstack.org/releasenotes/networking-sfc/ Features -------- * Creation of Service Function Chains consisting of an ordered sequence of Service Functions. SFs are virtual machines (or potentially physical devices) that perform a network function such as firewall, content cache, packet inspection, or any other function that requires processing of packets in a flow from point A to point B. * Reference implementation with Open vSwitch * Flow classification mechanism (ability to select and act on traffic) * Vendor neutral API * Modular plugin driver architecture Service Function Chaining Key Contributors ------------------------------------------ * Cathy Zhang (Project Lead): https://launchpad.net/~cathy-h-zhang * Louis Fourie: https://launchpad.net/~lfourie * Paul Carver: https://launchpad.net/~pcarver * Vikram: https://launchpad.net/~vikschw * Mohankumar: https://blueprints.launchpad.net/~mohankumar-n * Rao Fei: https://launchpad.net/~milo-frao * Xiaodong Wang: https://launchpad.net/~xiaodongwang991481 * Ramanjaneya Reddy Palleti: https://launchpad.net/~ramanjieee * Stephen Wong: https://launchpad.net/~s3wong * Igor Duarte Cardoso: https://launchpad.net/~igordcard * Prithiv: https://launchpad.net/~prithiv * Akihiro Motoki: https://launchpad.net/~amotoki * Swaminathan Vasudevan: https://launchpad.net/~swaminathan-vasudevan * Armando Migliaccio https://launchpad.net/~armando-migliaccio * Kyle Mestery https://launchpad.net/~mestery Background on the Subject of Service Function Chaining ------------------------------------------------------ * Original Neutron bug (request for enhancement): https://bugs.launchpad.net/neutron/+bug/1450617 * https://blueprints.launchpad.net/neutron/+spec/neutron-api-extension-for-service-chaining * https://blueprints.launchpad.net/neutron/+spec/common-service-chaining-driver-api * https://wiki.opnfv.org/display/VFG/Openstack+Based+VNF+Forwarding+Graph Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Requires-Python: >=3.6 networking-sfc-10.0.0/networking_sfc.egg-info/top_level.txt0000664000175000017500000000001713656750461024033 0ustar zuulzuul00000000000000networking_sfc networking-sfc-10.0.0/networking_sfc.egg-info/not-zip-safe0000664000175000017500000000000113656750461023531 0ustar zuulzuul00000000000000 networking-sfc-10.0.0/networking_sfc.egg-info/SOURCES.txt0000664000175000017500000003023113656750461023166 0ustar zuulzuul00000000000000.coveragerc .pylintrc .stestr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MANIFEST.in README.rst babel.cfg bindep.txt lower-constraints.txt requirements.txt setup.cfg setup.py test-requirements.txt tox.ini api-ref/source/conf.py api-ref/source/index.rst api-ref/source/parameters.yaml api-ref/source/sfc-chains.inc api-ref/source/sfc-classifiers.inc api-ref/source/sfc-port-pair-groups.inc api-ref/source/sfc-port-pairs.inc devstack/README.md devstack/plugin.sh devstack/settings doc/requirements.txt doc/api_samples/sfc-chains/port-chain-create-req.json doc/api_samples/sfc-chains/port-chain-create-resp.json doc/api_samples/sfc-chains/port-chain-get-resp.json doc/api_samples/sfc-chains/port-chain-list-resp.json doc/api_samples/sfc-chains/port-chain-update-req.json doc/api_samples/sfc-chains/port-chain-update-resp.json doc/api_samples/sfc-classifiers/flow-classifier-create-req.json doc/api_samples/sfc-classifiers/flow-classifier-create-resp.json doc/api_samples/sfc-classifiers/flow-classifier-get-resp.json doc/api_samples/sfc-classifiers/flow-classifier-list-resp.json doc/api_samples/sfc-classifiers/flow-classifier-update-req.json doc/api_samples/sfc-classifiers/flow-classifier-update-resp.json doc/api_samples/sfc-port-pair-groups/port-pair-group-create-req.json doc/api_samples/sfc-port-pair-groups/port-pair-group-create-resp.json doc/api_samples/sfc-port-pair-groups/port-pair-group-get-resp.json doc/api_samples/sfc-port-pair-groups/port-pair-group-list-resp.json doc/api_samples/sfc-port-pair-groups/port-pair-group-update-req.json doc/api_samples/sfc-port-pair-groups/port-pair-group-update-resp.json doc/api_samples/sfc-port-pairs/port-pair-create-req.json doc/api_samples/sfc-port-pairs/port-pair-create-resp.json doc/api_samples/sfc-port-pairs/port-pair-get-resp.json doc/api_samples/sfc-port-pairs/port-pair-list-resp.json doc/api_samples/sfc-port-pairs/port-pair-update-req.json doc/api_samples/sfc-port-pairs/port-pair-update-resp.json doc/api_samples/sfc-service-graphs/service-graph-create-req.json doc/api_samples/sfc-service-graphs/service-graph-create-resp.json doc/api_samples/sfc-service-graphs/service-graph-get-resp.json doc/api_samples/sfc-service-graphs/service-graph-list-resp.json doc/api_samples/sfc-service-graphs/service-graph-update-req.json doc/api_samples/sfc-service-graphs/service-graph-update-resp.json doc/source/conf.py doc/source/index.rst doc/source/readme.rst doc/source/_static/.placeholder doc/source/configuration/index.rst doc/source/configuration/networking-sfc.rst doc/source/configuration/policy-sample.rst doc/source/configuration/policy.rst doc/source/configuration/samples/networking-sfc.rst doc/source/contributor/alembic_migration.rst doc/source/contributor/api.rst doc/source/contributor/contribution.rst doc/source/contributor/ietf_sfc_encapsulation.rst doc/source/contributor/index.rst doc/source/contributor/ovs_driver_and_agent_workflow.rst doc/source/contributor/ovs_symmetric_port_chain.rst doc/source/contributor/sfc_non_transparent_sf.rst doc/source/contributor/sfc_ovn_driver.rst doc/source/contributor/sfc_port_chain_tap.rst doc/source/contributor/sfc_proxy_port_correlation.rst doc/source/contributor/system_design_and_workflow.rst doc/source/install/configuration.rst doc/source/install/index.rst doc/source/install/install.rst doc/source/user/command_extensions.rst doc/source/user/index.rst doc/source/user/usage.rst etc/README.txt etc/oslo-config-generator/networking-sfc.conf etc/oslo-policy-generator/policy.conf networking_sfc/__init__.py networking_sfc/_i18n.py networking_sfc/opts.py networking_sfc/version.py networking_sfc.egg-info/PKG-INFO networking_sfc.egg-info/SOURCES.txt networking_sfc.egg-info/dependency_links.txt networking_sfc.egg-info/entry_points.txt networking_sfc.egg-info/not-zip-safe networking_sfc.egg-info/pbr.json networking_sfc.egg-info/requires.txt networking_sfc.egg-info/top_level.txt networking_sfc/cli/__init__.py networking_sfc/cli/flow_classifier.py networking_sfc/cli/port_chain.py networking_sfc/cli/port_pair.py networking_sfc/cli/port_pair_group.py networking_sfc/db/__init__.py networking_sfc/db/flowclassifier_db.py networking_sfc/db/sfc_db.py networking_sfc/db/migration/README networking_sfc/db/migration/__init__.py networking_sfc/db/migration/alembic_migrations/__init__.py networking_sfc/db/migration/alembic_migrations/env.py networking_sfc/db/migration/alembic_migrations/script.py.mako networking_sfc/db/migration/alembic_migrations/versions/CONTRACT_HEAD networking_sfc/db/migration/alembic_migrations/versions/EXPAND_HEAD networking_sfc/db/migration/alembic_migrations/versions/start_networking_sfc.py networking_sfc/db/migration/alembic_migrations/versions/mitaka/contract/48072cb59133_initial.py networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/24fc7241aa5_initial.py networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/5a475fc853e6_ovs_data_model.py networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/9768e6a66c9_flowclassifier_data_model.py networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/c3e178d4a985_sfc_data_model.py networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/d1002a1f97f6_update_flow_classifier.py networking_sfc/db/migration/alembic_migrations/versions/mitaka/expand/fa75d46a7f11_add_port_pair_group_params.py networking_sfc/db/migration/alembic_migrations/versions/newton/contract/010308b06b49_rename_tenant_to_project.py networking_sfc/db/migration/alembic_migrations/versions/newton/contract/06382790fb2c_fix_foreign_constraints.py networking_sfc/db/migration/alembic_migrations/versions/ocata/expand/6185f1633a3d_add_correlation_as_pp_detail.py networking_sfc/db/migration/alembic_migrations/versions/ocata/expand/b3adaf631bab__add_fwd_path_and_in_mac_column.py networking_sfc/db/migration/alembic_migrations/versions/pike/expand/61832141fb82_add_ppg_n_tuple_mapping_column.py networking_sfc/db/migration/alembic_migrations/versions/pike/expand/8329e9be2d8a_modify_value_column_size_in_port_pair_.py networking_sfc/db/migration/alembic_migrations/versions/queens/expand/53ed5bec6cff_add_service_graph_api_resource.py networking_sfc/db/migration/alembic_migrations/versions/queens/expand/a3ad63aa834f_extra_attributes_for_pathnode.py networking_sfc/db/migration/alembic_migrations/versions/queens/expand/d6fb381b65f2_tap_enabled_attribute_port_pair_group.py networking_sfc/db/migration/models/__init__.py networking_sfc/db/migration/models/head.py networking_sfc/extensions/__init__.py networking_sfc/extensions/flowclassifier.py networking_sfc/extensions/servicegraph.py networking_sfc/extensions/sfc.py networking_sfc/extensions/tap.py networking_sfc/policies/__init__.py networking_sfc/policies/base.py networking_sfc/policies/flow_classifier.py networking_sfc/policies/port_chain.py networking_sfc/policies/port_pair.py networking_sfc/policies/port_pair_group.py networking_sfc/policies/service_graph.py networking_sfc/services/__init__.py networking_sfc/services/flowclassifier/__init__.py networking_sfc/services/flowclassifier/driver_manager.py networking_sfc/services/flowclassifier/plugin.py networking_sfc/services/flowclassifier/common/__init__.py networking_sfc/services/flowclassifier/common/config.py networking_sfc/services/flowclassifier/common/context.py networking_sfc/services/flowclassifier/common/exceptions.py networking_sfc/services/flowclassifier/drivers/__init__.py networking_sfc/services/flowclassifier/drivers/base.py networking_sfc/services/flowclassifier/drivers/dummy/__init__.py networking_sfc/services/flowclassifier/drivers/dummy/dummy.py networking_sfc/services/flowclassifier/drivers/ovs/__init__.py networking_sfc/services/flowclassifier/drivers/ovs/driver.py networking_sfc/services/sfc/__init__.py networking_sfc/services/sfc/driver_manager.py networking_sfc/services/sfc/plugin.py networking_sfc/services/sfc/agent/__init__.py networking_sfc/services/sfc/agent/extensions/__init__.py networking_sfc/services/sfc/agent/extensions/sfc.py networking_sfc/services/sfc/agent/extensions/openvswitch/__init__.py networking_sfc/services/sfc/agent/extensions/openvswitch/sfc_driver.py networking_sfc/services/sfc/common/__init__.py networking_sfc/services/sfc/common/config.py networking_sfc/services/sfc/common/context.py networking_sfc/services/sfc/common/exceptions.py networking_sfc/services/sfc/common/ovs_ext_lib.py networking_sfc/services/sfc/drivers/__init__.py networking_sfc/services/sfc/drivers/base.py networking_sfc/services/sfc/drivers/dummy/__init__.py networking_sfc/services/sfc/drivers/dummy/dummy.py networking_sfc/services/sfc/drivers/ovs/__init__.py networking_sfc/services/sfc/drivers/ovs/constants.py networking_sfc/services/sfc/drivers/ovs/db.py networking_sfc/services/sfc/drivers/ovs/driver.py networking_sfc/services/sfc/drivers/ovs/rpc.py networking_sfc/services/sfc/drivers/ovs/rpc_topics.py networking_sfc/tests/__init__.py networking_sfc/tests/base.py networking_sfc/tests/functional/__init__.py networking_sfc/tests/functional/test_service.py networking_sfc/tests/functional/db/__init__.py networking_sfc/tests/functional/db/test_migrations.py networking_sfc/tests/functional/db/test_models.py networking_sfc/tests/functional/services/__init__.py networking_sfc/tests/functional/services/sfc/__init__.py networking_sfc/tests/functional/services/sfc/agent/__init__.py networking_sfc/tests/functional/services/sfc/agent/extensions/__init__.py networking_sfc/tests/functional/services/sfc/agent/extensions/test_ovs_agent_sfc_extension.py networking_sfc/tests/unit/__init__.py networking_sfc/tests/unit/cli/__init__.py networking_sfc/tests/unit/cli/test_flow_classifier.py networking_sfc/tests/unit/cli/test_port_chain.py networking_sfc/tests/unit/cli/test_port_pair.py networking_sfc/tests/unit/cli/test_port_pair_group.py networking_sfc/tests/unit/db/__init__.py networking_sfc/tests/unit/db/test_flowclassifier_db.py networking_sfc/tests/unit/db/test_sfc_db.py networking_sfc/tests/unit/extensions/__init__.py networking_sfc/tests/unit/extensions/test_flowclassifier.py networking_sfc/tests/unit/extensions/test_servicegraph.py networking_sfc/tests/unit/extensions/test_sfc.py networking_sfc/tests/unit/extensions/test_tap.py networking_sfc/tests/unit/services/__init__.py networking_sfc/tests/unit/services/flowclassifier/__init__.py networking_sfc/tests/unit/services/flowclassifier/test_driver_manager.py networking_sfc/tests/unit/services/flowclassifier/test_plugin.py networking_sfc/tests/unit/services/flowclassifier/drivers/__init__.py networking_sfc/tests/unit/services/flowclassifier/drivers/ovs/__init__.py networking_sfc/tests/unit/services/flowclassifier/drivers/ovs/test_driver.py networking_sfc/tests/unit/services/sfc/__init__.py networking_sfc/tests/unit/services/sfc/test_driver_manager.py networking_sfc/tests/unit/services/sfc/test_plugin.py networking_sfc/tests/unit/services/sfc/agent/__init__.py networking_sfc/tests/unit/services/sfc/agent/extensions/__init__.py networking_sfc/tests/unit/services/sfc/agent/extensions/test_sfc.py networking_sfc/tests/unit/services/sfc/agent/extensions/openvswitch/__init__.py networking_sfc/tests/unit/services/sfc/agent/extensions/openvswitch/test_sfc_driver.py networking_sfc/tests/unit/services/sfc/common/__init__.py networking_sfc/tests/unit/services/sfc/common/test_ovs_ext_lib.py networking_sfc/tests/unit/services/sfc/drivers/__init__.py networking_sfc/tests/unit/services/sfc/drivers/ovs/__init__.py networking_sfc/tests/unit/services/sfc/drivers/ovs/test_driver.py playbooks/multinode-scenario-pre-run.yaml releasenotes/notes/.placeholder releasenotes/notes/drop-py27-support-4670c8cdcfa3ba78.yaml releasenotes/notes/mpls-correlation-c36070eba63b9f87.yaml releasenotes/notes/networking-sfc-0151b67501c641ef.yaml releasenotes/notes/service-graphs-4a1e54f6bbbfe805.yaml releasenotes/notes/sfc-tap-port-pair-db6b2f3d29520c9b.yaml releasenotes/notes/unique-correlation-in-ppg-96d803a244425f66.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder tools/check_unit_test_structure.sh tools/clean.sh zuul.d/jobs.yaml zuul.d/project.yaml zuul.d/projects.yamlnetworking-sfc-10.0.0/tools/0000775000175000017500000000000013656750461015727 5ustar zuulzuul00000000000000networking-sfc-10.0.0/tools/clean.sh0000775000175000017500000000030413656750333017343 0ustar zuulzuul00000000000000#!/usr/bin/env bash rm -rf ./*.deb ./*.tar.gz ./*.dsc ./*.changes rm -rf */*.deb rm -rf ./plugins/**/build/ ./plugins/**/dist rm -rf ./plugins/**/lib/neutron_*_plugin.egg-info ./plugins/neutron-* networking-sfc-10.0.0/tools/check_unit_test_structure.sh0000775000175000017500000000313413656750333023560 0ustar zuulzuul00000000000000#!/usr/bin/env bash # This script identifies the unit test modules that do not correspond # directly with a module in the code tree. See TESTING.rst for the # intended structure. neutron_path=$(cd "$(dirname "$0")/.." && pwd) base_test_path=networking_sfc/tests/unit test_path=$neutron_path/$base_test_path test_files=$(find ${test_path} -iname 'test_*.py') ignore_regexes=( "^plugins.*$" "^db/test_migrations.py$" ) error_count=0 ignore_count=0 total_count=0 for test_file in ${test_files[@]}; do relative_path=${test_file#$test_path/} expected_path=$(dirname $neutron_path/networking_sfc/$relative_path) test_filename=$(basename "$test_file") expected_filename=${test_filename#test_} # Module filename (e.g. foo/bar.py -> foo/test_bar.py) filename=$expected_path/$expected_filename # Package dir (e.g. foo/ -> test_foo.py) package_dir=${filename%.py} if [ ! -f "$filename" ] && [ ! -d "$package_dir" ]; then for ignore_regex in ${ignore_regexes[@]}; do if [[ "$relative_path" =~ $ignore_regex ]]; then ((ignore_count++)) continue 2 fi done echo "Unexpected test file: $base_test_path/$relative_path" ((error_count++)) fi ((total_count++)) done if [ "$ignore_count" -ne 0 ]; then echo "$ignore_count unmatched test modules were ignored" fi if [ "$error_count" -eq 0 ]; then echo 'Success! All test modules match targets in the code tree.' exit 0 else echo "Failure! $error_count of $total_count test modules do not match targets in the code tree." exit 1 fi networking-sfc-10.0.0/README.rst0000664000175000017500000001043113656750333016253 0ustar zuulzuul00000000000000============================================================ Service Function Chaining Extension for OpenStack Networking ============================================================ Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/networking-sfc.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on Service Function Chaining API ----------------------------- This project provides APIs and implementations to support Service Function Chaining in Neutron. Service Function Chaining is a mechanism for overriding the basic destination based forwarding that is typical of IP networks. It is conceptually related to Policy Based Routing in physical networks but it is typically thought of as a Software Defined Networking technology. It is often used in conjunction with security functions although it may be used for a broader range of features. Fundamentally SFC is the ability to cause network packet flows to route through a network via a path other than the one that would be chosen by routing table lookups on the packet's destination IP address. It is most commonly used in conjunction with Network Function Virtualization when recreating in a virtual environment a series of network functions that would have traditionally been implemented as a collection of physical network devices connected in series by cables. A very simple example of a service chain would be one that forces all traffic from point A to point B to go through a firewall even though the firewall is not literally between point A and B from a routing table perspective. A more complex example is an ordered series of functions, each implemented in multiple VMs, such that traffic must flow through one VM at each hop in the chain but the network uses a hashing algorithm to distribute different flows across multiple VMs at each hop. This is an initial release, feedback is requested from users and the API may evolve based on that feedback. * Free software: Apache license * Source: https://opendev.org/openstack/networking-sfc * Documentation: https://docs.openstack.org/networking-sfc/latest * Overview: https://launchpad.net/networking-sfc * Bugs: https://bugs.launchpad.net/networking-sfc * Blueprints: https://blueprints.launchpad.net/networking-sfc * Wiki: https://wiki.openstack.org/wiki/Neutron/ServiceInsertionAndChaining * Release notes: https://docs.openstack.org/releasenotes/networking-sfc/ Features -------- * Creation of Service Function Chains consisting of an ordered sequence of Service Functions. SFs are virtual machines (or potentially physical devices) that perform a network function such as firewall, content cache, packet inspection, or any other function that requires processing of packets in a flow from point A to point B. * Reference implementation with Open vSwitch * Flow classification mechanism (ability to select and act on traffic) * Vendor neutral API * Modular plugin driver architecture Service Function Chaining Key Contributors ------------------------------------------ * Cathy Zhang (Project Lead): https://launchpad.net/~cathy-h-zhang * Louis Fourie: https://launchpad.net/~lfourie * Paul Carver: https://launchpad.net/~pcarver * Vikram: https://launchpad.net/~vikschw * Mohankumar: https://blueprints.launchpad.net/~mohankumar-n * Rao Fei: https://launchpad.net/~milo-frao * Xiaodong Wang: https://launchpad.net/~xiaodongwang991481 * Ramanjaneya Reddy Palleti: https://launchpad.net/~ramanjieee * Stephen Wong: https://launchpad.net/~s3wong * Igor Duarte Cardoso: https://launchpad.net/~igordcard * Prithiv: https://launchpad.net/~prithiv * Akihiro Motoki: https://launchpad.net/~amotoki * Swaminathan Vasudevan: https://launchpad.net/~swaminathan-vasudevan * Armando Migliaccio https://launchpad.net/~armando-migliaccio * Kyle Mestery https://launchpad.net/~mestery Background on the Subject of Service Function Chaining ------------------------------------------------------ * Original Neutron bug (request for enhancement): https://bugs.launchpad.net/neutron/+bug/1450617 * https://blueprints.launchpad.net/neutron/+spec/neutron-api-extension-for-service-chaining * https://blueprints.launchpad.net/neutron/+spec/common-service-chaining-driver-api * https://wiki.opnfv.org/display/VFG/Openstack+Based+VNF+Forwarding+Graph networking-sfc-10.0.0/setup.py0000664000175000017500000000127113656750333016300 0ustar zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) networking-sfc-10.0.0/.stestr.conf0000664000175000017500000000011413656750333017032 0ustar zuulzuul00000000000000[DEFAULT] test_path=${OS_TEST_PATH:-./networking_sfc/tests/unit} top_dir=./ networking-sfc-10.0.0/devstack/0000775000175000017500000000000013656750461016373 5ustar zuulzuul00000000000000networking-sfc-10.0.0/devstack/settings0000664000175000017500000000071213656750333020154 0ustar zuulzuul00000000000000# settings for networking-sfc devstack plugin NETWORKING_SFC_DIR=${NETWORKING_SFC_DIR:-"$DEST/networking-sfc"} NEUTRON_FLOWCLASSIFIER_PLUGIN=${NEUTRON_FLOWCLASSIFIER_PLUGIN:="networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin"} NEUTRON_SFC_PLUGIN=${NEUTRON_SFC_PLUGIN:-"networking_sfc.services.sfc.plugin.SfcPlugin"} NEUTRON_FLOWCLASSIFIER_DRIVERS=${NEUTRON_FLOWCLASSIFIER_DRIVERS:-"ovs"} NEUTRON_SFC_DRIVERS=${NEUTRON_SFC_DRIVERS:-"ovs"} networking-sfc-10.0.0/devstack/README.md0000664000175000017500000000203113656750333017644 0ustar zuulzuul00000000000000This directory contains the networking-sfc devstack plugin. To configure the networking sfc, in the [[local|localrc]] section, you will need to enable the networking-sfc devstack plugin by editing the [[local|localrc]] section of your local.conf file. 1) Enable the plugin To enable the plugin, add a line of the form: enable_plugin networking-sfc [GITREF] where is the URL of a networking-sfc repository [GITREF] is an optional git ref (branch/ref/tag). The default is master. For example If you have already cloned the networking-sfc repository (which is useful when testing unmerged changes) enable_plugin networking-sfc /opt/stack/networking-sfc Or, if you want to pull the networking-sfc repository from Github and use a particular branch (for example Liberty, here) enable_plugin networking-sfc https://opendev.org/openstack/networking-sfc master For more information, see the "Externally Hosted Plugins" section of https://docs.openstack.org/devstack/latest/plugins.html . networking-sfc-10.0.0/devstack/plugin.sh0000664000175000017500000000234413656750333020226 0ustar zuulzuul00000000000000# function definitions for networking-sfc devstack plugin function networking_sfc_install { setup_develop $NETWORKING_SFC_DIR } function _networking_sfc_install_server { neutron_service_plugin_class_add $NEUTRON_FLOWCLASSIFIER_PLUGIN neutron_service_plugin_class_add $NEUTRON_SFC_PLUGIN iniadd $NEUTRON_CONF sfc drivers $NEUTRON_SFC_DRIVERS iniadd $NEUTRON_CONF flowclassifier drivers $NEUTRON_FLOWCLASSIFIER_DRIVERS neutron-db-manage --subproject networking-sfc upgrade head } function _networking_sfc_install_agent { source $NEUTRON_DIR/devstack/lib/l2_agent plugin_agent_add_l2_agent_extension sfc configure_l2_agent } function networking_sfc_configure_common { if is_service_enabled q-svc neutron-api; then _networking_sfc_install_server fi if is_service_enabled q-agt neutron-agent && [[ "$Q_AGENT" == "openvswitch" ]]; then _networking_sfc_install_agent fi } if [[ "$1" == "stack" && "$2" == "install" ]]; then # Perform installation of service source echo_summary "Installing networking-sfc" networking_sfc_install elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring networking-sfc" networking_sfc_configure_common fi networking-sfc-10.0.0/LICENSE0000664000175000017500000002363713656750333015605 0ustar zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. networking-sfc-10.0.0/playbooks/0000775000175000017500000000000013656750461016572 5ustar zuulzuul00000000000000networking-sfc-10.0.0/playbooks/multinode-scenario-pre-run.yaml0000664000175000017500000000005513656750333024643 0ustar zuulzuul00000000000000- hosts: all roles: - multi-node-setup networking-sfc-10.0.0/.coveragerc0000664000175000017500000000015313656750333016705 0ustar zuulzuul00000000000000[run] branch = True source = networking_sfc # omit = networking_sfc/tests/* [report] ignore_errors = True