vmware-nsx-12.0.1/0000775000175100017510000000000013244524600013726 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/CONTRIBUTING.rst0000666000175100017510000000106513244523345016400 0ustar zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/vmware-nsx vmware-nsx-12.0.1/README.rst0000666000175100017510000000142113244523345015422 0ustar zuulzuul00000000000000=================== VMware-NSX package =================== You have come across the VMware-NSX family of Neutron plugins External Resources: ------------------- The homepage for the VMware-NSX project is on Launchpad_. .. _Launchpad: https://launchpad.net/vmware-nsx Use this site for asking for help, and filing bugs. Code is available both git.openstack.org_ and github_. .. _git.openstack.org: https://git.openstack.org/cgit/openstack/vmware-nsx/tree/ .. _github: https://github.com/openstack/vmware-nsx For help on usage and hacking of VMware-NSX, please send a message to the openstack-dev_ mailing list. .. _openstack-dev: mailto:openstack-dev@lists.openstack.org For information on how to contribute to VMware-NSX, please see the contents of the CONTRIBUTING.rst file. vmware-nsx-12.0.1/releasenotes/0000775000175100017510000000000013244524600016417 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/releasenotes/source/0000775000175100017510000000000013244524600017717 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/releasenotes/source/_templates/0000775000175100017510000000000013244524600022054 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/releasenotes/source/_templates/.placeholder0000666000175100017510000000000013244523345024334 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/releasenotes/source/unreleased.rst0000666000175100017510000000015613244523345022611 0ustar zuulzuul00000000000000============================= Current Series Release Notes ============================= .. release-notes:: vmware-nsx-12.0.1/releasenotes/source/pike.rst0000666000175100017510000000022613244523345021410 0ustar zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: origin/stable/pike vmware-nsx-12.0.1/releasenotes/source/conf.py0000666000175100017510000002145113244523345021230 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # VMware NSX Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'oslosphinx', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'VMware NSX Release Notes' copyright = u'2015, VMware, Inc.' # Release notes do not need a version number in the title, they # cover multiple releases. # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'VMwareNsxReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'VMwareNsxReleaseNotes.tex', u'VMware NSX Release Notes Documentation', u'VMware NSX Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'vmwarensxreleasenotes', u'VMware NSX Release Notes Documentation', [u'VMware NSX Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'VMwareNsxReleaseNotes', u'VMware NSX Release Notes Documentation', u'VMware NSX Developers', 'VMwareNsxReleaseNotes', 'VMware NSX plugins code for OpenStack Neutron.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] vmware-nsx-12.0.1/releasenotes/source/liberty.rst0000666000175100017510000000022213244523345022126 0ustar zuulzuul00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty vmware-nsx-12.0.1/releasenotes/source/ocata.rst0000666000175100017510000000021213244523345021542 0ustar zuulzuul00000000000000============================ Ocata Series Release Notes ============================ .. release-notes:: :branch: origin/stable/ocata vmware-nsx-12.0.1/releasenotes/source/_static/0000775000175100017510000000000013244524600021345 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/releasenotes/source/_static/.placeholder0000666000175100017510000000000013244523345023625 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/releasenotes/source/index.rst0000666000175100017510000000024313244523413021562 0ustar zuulzuul00000000000000========================== VMware NSX Release Notes ========================== .. toctree:: :maxdepth: 1 unreleased pike ocata newton liberty vmware-nsx-12.0.1/releasenotes/source/newton.rst0000666000175100017510000000021413244523345021767 0ustar zuulzuul00000000000000============================ Newton Series Release Notes ============================ .. release-notes:: :branch: origin/stable/newton vmware-nsx-12.0.1/releasenotes/notes/0000775000175100017510000000000013244524600017547 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/releasenotes/notes/nsx-extension-drivers-b1aedabe5296d4d0.yaml0000666000175100017510000000052113244523345027362 0ustar zuulzuul00000000000000--- prelude: > We have added a new configuration variable that will enable us to enable existing extensions. The new configuration variable is ``nsx_extension_drivers``. This is in the default section. This is a list of extansion names. The code for the drivers must be in the directory vmware_nsx.extension_drivers. vmware-nsx-12.0.1/releasenotes/notes/nsxv-lbaas-l7-704f748300d1a399.yaml0000666000175100017510000000022313244523345025034 0ustar zuulzuul00000000000000--- prelude: > The NSX-V lbaas plugin now supports L7 rules & policies. features: - The NSX-V lbaas plugin now supports L7 rules & policies. vmware-nsx-12.0.1/releasenotes/notes/nsxv3-vpnaas-0b02762ff4b83904.yaml0000666000175100017510000000022613244523345025063 0ustar zuulzuul00000000000000--- prelude: > Support VPN-as-a-Service for VPN IPSEC in NSXv3 plugin. features: - | NSXv3 plugin now supports VPN SEC through VPNaaS plugin. vmware-nsx-12.0.1/releasenotes/notes/nsxv3-update-provider-types-aa1c20e988878ffe.yaml0000666000175100017510000000041713244523345030312 0ustar zuulzuul00000000000000--- prelude: > Adding support for Geneve and nSX-network provider networks. features: - | Deprecating the VXLAN provider network type. Adding Geneve provider networks (with overlay transport zone). Adding nsx-net provider networks attached to an existing nsx vmware-nsx-12.0.1/releasenotes/notes/nsxv3-init-from-tags-bcd4f3245a78e9a6.yaml0000666000175100017510000000062513244523345026667 0ustar zuulzuul00000000000000--- prelude: > NSX-V3 plugin supports a new configuration option for the transport zones, tier-0 router, dhcp profile and md-proxy in the nsx ini file using NSX Tags insead of names or IDs. features: - | NSX-V3 plugin supports a new configuration option for the transport zones, tier-0 router, dhcp profile and md-proxy in the nsx ini file using NSX Tags insead of names or IDs. vmware-nsx-12.0.1/releasenotes/notes/nsxv-ipam-support-6eb1ac4e0e025ddd.yaml0000666000175100017510000000056113244523345026520 0ustar zuulzuul00000000000000--- prelude: > The NSX-v plugin can use the platform IPAM for ip allocations for external networks and provider networks. features: - The NSX-v plugin can use the platform IPAM for ip allocations for external networks and provider networks. In order to use this feature, the ipam_driver in the neutron.conf file should be set to vmware_nsxv_ipam. vmware-nsx-12.0.1/releasenotes/notes/nsxv3-availability-zones-8decf892df62.yaml0000666000175100017510000000065513244523345027251 0ustar zuulzuul00000000000000--- prelude: > The NSX-v3 plugin supports availability zones hints on networks creation in order to separate the native dhcp configuration. features: - The NSX-v3 plugin supports availability zones hints on networks creation in order to separate the native dhcp configuration. The availability zones configuration includes the metadata_proxy, dhcp_profile, native_metadata_route and dns related parameters. vmware-nsx-12.0.1/releasenotes/notes/nsxv-fwaas-driver-4c457dee3fc3bae2.yaml0000666000175100017510000000030713244523345026463 0ustar zuulzuul00000000000000--- prelude: > The NSX-V plugin can suppport FWaaS-V1 for setting router edges firewall rules. features: - | The NSX-V plugin can suppport FWaaS-V1 for setting router edges firewall rules. vmware-nsx-12.0.1/releasenotes/notes/nsxv-router-flavors-8e4cea7f6e12d44d.yaml0000666000175100017510000000046713244523345027021 0ustar zuulzuul00000000000000--- prelude: > The NSX-v plugin supports using router flavors in routers creation. features: - The NSX-v plugin supports using router flavors in routers creation. A router flavor can include the router type, size, distributed flag and availability zones in order to easily create similar routers. vmware-nsx-12.0.1/releasenotes/notes/nsxv3-ipam-support-137174152c65459d.yaml0000666000175100017510000000070013244523345026076 0ustar zuulzuul00000000000000--- prelude: > The NSX-v3 plugin can use the platform IPAM for ip allocations for all network types. features: - The NSX-v3 plugin can use the platform IPAM for ip allocations for all network types. In order to use this feature, the ipam_driver in the neutron.conf file should be set to vmware_nsxv3_ipam. Currently the plugin does not support allocating a specific address from the pool depending on the NSX version. vmware-nsx-12.0.1/releasenotes/notes/nsxv-subnets-dhcp-mtu-c7028748b516422e.yaml0000666000175100017510000000043413244523345026640 0ustar zuulzuul00000000000000--- prelude: > The new extension dhcp-mtu of subnets in the NSX-v plugin can be used to configure the DHCP client network interface MTU features: - The new extension dhcp-mtu of subnets in the NSX-v plugin can be used to configure the DHCP client network interface MTU. vmware-nsx-12.0.1/releasenotes/notes/rename_uuid_config_params-b36c379f64838334.yaml0000666000175100017510000000125113244523345027641 0ustar zuulzuul00000000000000--- prelude: > The 'default_tier0_router_uuid', 'default_overlay_tz_uuid', 'default_vlan_tz_uuid', and 'default_bridge_cluster_uuid' options have been deprecated and replaced by 'default_tier0_router', 'default_overlay_tz', 'default_vlan_tz', and 'default_bridge_cluster' respectively, which can accept both name or uuid deprecations: - The 'default_tier0_router_uuid', 'default_overlay_tz_uuid', 'default_vlan_tz_uuid', and 'default_bridge_cluster_uuid' options have been deprecated and replaced by 'default_tier0_router', 'default_overlay_tz', 'default_vlan_tz', and 'default_bridge_cluster' respectively, which can accept both name or uuid vmware-nsx-12.0.1/releasenotes/notes/nsxv3-multi-managers-b645c4202a8476e9.yaml0000666000175100017510000000042413244523345026527 0ustar zuulzuul00000000000000--- prelude: > The NSX-v3 plugin supports different credentials for the NSX managers. features: The nsxv3 configuration parameters ca_file, nsx_api_user & nsx_api_password are now lists, in order to support different credentials for each of the NSX managers. vmware-nsx-12.0.1/releasenotes/notes/nsx-dns-integration-extension-8260456051d61743.yaml0000666000175100017510000000043013244523345030217 0ustar zuulzuul00000000000000--- prelude: > The dns-integration extension is now supported in both NSXV and NSXV3 plugins. It can be enabled by adding 'vmware_nsxv_dns' (for NSXV) or 'vmware_nsxv3_dns' (for NSXV3) to the ``nsx_extension_drivers`` configuration variable in neutron.conf file. vmware-nsx-12.0.1/releasenotes/notes/bind-floating-ips-per-az-142f0de7ebfae1c8.yaml0000666000175100017510000000061013244523345027572 0ustar zuulzuul00000000000000--- prelude: > Enable 'bind_floatingip_to_all_interfaces' to be configured per availability zone. features: - | Enable 'bind_floatingip_to_all_interfaces' to be configured per availability zone. This will enable an admin to ensure that an AZ can have flotaing IP's configured on all edge vNICS. This enables VM's on the same subnet to communicate via floating IP's. vmware-nsx-12.0.1/releasenotes/notes/nsxv3-dhcp-relay-32cf1ae281e1.yaml0000666000175100017510000000066213244523345025360 0ustar zuulzuul00000000000000--- prelude: > The NSX-v3 plugin supports DHCP relay service per network availability zones. features: - The NSX-v3 plugin supports DHCP relay service per network availability zones. When a router interface port is created, the relay service will be added to it. DHCP traffic on the subnet will go through the DHCP server configured in the dhcp relay service on the NSX, if it is connected to the router.vmware-nsx-12.0.1/releasenotes/notes/nsxv3-trnasparent-vlan-fe06e1d3aa2fbcd9.yaml0000666000175100017510000000024213244523345027525 0ustar zuulzuul00000000000000--- prelude: > The NSX-V3 plugin supports transparent vlan networks. features: - | The NSX-V3 plugin supports transparent vlan networks for guest vlan. vmware-nsx-12.0.1/releasenotes/notes/nsxv3-taas-driver-1a316cf3915fcb3d.yaml0000666000175100017510000000041713244523345026232 0ustar zuulzuul00000000000000--- prelude: > Support Tap-as-a-Service for port mirroring in NSXv3 plugin. features: - NSXv3 plugin now supports port mirroring via TaaS APIs which integrates into the backend L3SPAN APIs i.e. the mirrored packets are sent to the destination port over L3. vmware-nsx-12.0.1/releasenotes/notes/rename_uuid_to_name-e64699df75176d4d.yaml0000666000175100017510000000070313244523345026625 0ustar zuulzuul00000000000000--- prelude: > - In NSX|v3 plugin, the 'dhcp_profile_uuid' and 'metadata_proxy_uuid' options have been deprecated and replaced by 'dhcp_profile' and 'metadata_proxy' respectively, which can accept both name or uuid. deprecations: - In NSX|v3 plugin, the 'dhcp_profile_uuid' and 'metadata_proxy_uuid' options have been deprecated and replaced by 'dhcp_profile' and 'metadata_proxy' respectively, which can accept both name or uuid. vmware-nsx-12.0.1/releasenotes/notes/nsxv-edge-random-placement-9534371967edec8f.yaml0000666000175100017510000000065313244523345027754 0ustar zuulzuul00000000000000--- prelude: > Support randomly selecting which will be the primary datastore and which will be the secondary one when deplying an edge, in order to balance the load. This new option is available globally as well as per availability_zone. features: - | Support randomly selecting which will be the primary datastore and which will be the secondary one when deplying an edge, in order to balance the load. vmware-nsx-12.0.1/releasenotes/notes/qos-support-d52b5e3abfc6c8d4.yaml0000666000175100017510000000053413244523345025414 0ustar zuulzuul00000000000000--- prelude: > Support for QoS bandwidth limit and DSCP marking. features: - The plugin can apply a QoS rule to networks and ports that mark outgoing traffic's type of service packet header field. - The plugin can apply a QoS rule to networks and ports that limits the outgoing traffic with the defined average and peak bandwidth. vmware-nsx-12.0.1/releasenotes/notes/nsxv3-add-trunk-driver-925ad1205972cbdf.yaml0000666000175100017510000000037313244523345027121 0ustar zuulzuul00000000000000--- prelude: > Support VLAN-aware-VM feature in NSXv3 plugin. features: - Trunk driver for NSXv3 plugin which allows creation of trunk ports and subports which subsequently create parent port and child ports relationship in the backend. vmware-nsx-12.0.1/releasenotes/notes/dvs_dns_integration-831224f15acbc728.yaml0000666000175100017510000000034613244523345026637 0ustar zuulzuul00000000000000--- features: - | One can enable DNS integration for the upstream neutron for VMware NSX-DVS. DNS integration extension by setting: nsx_extension_drivers = vmware_dvs_dns in the default section of neutron.conf. vmware-nsx-12.0.1/releasenotes/notes/nsxv-bgp-support-44f857d382943e08.yaml0000666000175100017510000000022213244523345025731 0ustar zuulzuul00000000000000--- prelude: > The NSX-V plugin suppports BGP for dynamic routing. features: - | The NSX-V plugin can suppport BGP for dynamic routing. vmware-nsx-12.0.1/releasenotes/notes/fwaas_v2-9445ea0aaea91c60.yaml0000666000175100017510000000027113244523345024436 0ustar zuulzuul00000000000000--- prelude: > The NSX-v3 plugin supports FWaaS V2. features: The NSX-v3 plugin now supports FWaaS V2 allowing to set a different firewall group policy on each router port. vmware-nsx-12.0.1/releasenotes/notes/nsxv-service-insertion-32ab34a0e0f6ab4f.yaml0000666000175100017510000000073713244523345027450 0ustar zuulzuul00000000000000--- prelude: > The NSX-V plugin supports service insertion by redirecting traffic matched to the neutron flow classifiers, to the NSX-V partner security services. features: - The NSX-V plugin supports service insertion by redirecting traffic matched to the neutron flow classifiers, to the NSX-V partner security services. For each flow-classifier defined in neutron, a new traffic redirection rule will be created in the NSX partner security services tab. vmware-nsx-12.0.1/releasenotes/notes/nsxv3-native-dhcp-metadata-27af1de98302162f.yaml0000666000175100017510000000061213244523345027634 0ustar zuulzuul00000000000000--- prelude: > The NSX-V3 plugin supports native DHCP and metadata services provided by NSX backend. features: - The NSX-V3 plugin version 1.1.0 allows users to use native DHCP and metadata services provided by designated edge cluster in NSX backend version 1.1.0. The edge cluster can provides high availability if more than one edge nodes are configured in the cluster. vmware-nsx-12.0.1/releasenotes/notes/block-all-no-security-groups-47af550349dbc85a.yaml0000666000175100017510000000062313244523345030317 0ustar zuulzuul00000000000000--- prelude: > Enable 'use_default_block_all' to ensure that traffic to a port that has no security groups and has port security enabled will be discarded. features: - | Enable 'use_default_block_all' to ensure that traffic to a port that has no security groups and has port security enabled will be discarded. This will ensure the same behaviours as the upstream security groups. vmware-nsx-12.0.1/releasenotes/notes/nsxv-vlan-selection-ec73aac44b3648a1.yaml0000666000175100017510000000025713244523345026652 0ustar zuulzuul00000000000000--- prelude: > The NSX-V plugin can decide on the VLAN tag for a provider network. features: - | The NSX-V plugin can decide on the VLAN tag for a provider network. vmware-nsx-12.0.1/releasenotes/notes/nsxv-exclusive-dhcp-7e5cde1cd88f8c5b.yaml0000666000175100017510000000041613244523345027034 0ustar zuulzuul00000000000000--- prelude: > Add support for exclusive DHCP edges. features: - | The NSX-v will now enable a tenant to deploy a exclusive DHCP edge. This is either via the global configuration variable ``exclusive_dhcp_edge`` or per AZ. By default this is disabled. vmware-nsx-12.0.1/releasenotes/notes/nsxv-policy-3f552191f94873cd.yaml0000666000175100017510000000061613244523345025031 0ustar zuulzuul00000000000000--- prelude: > The NSX-V plugin allows admin user to create security groups consuming NSX policies, both as regular / default and provider security gruops. features: - The NSX-V plugin supports the concumption of NSX policies through security groups. Depending on the configuration, an admin user can create security groups without rules, that will be connected to an NSX policy. vmware-nsx-12.0.1/releasenotes/notes/ens_support-49dbc626ba1b16be.yaml0000666000175100017510000000040613244523345025370 0ustar zuulzuul00000000000000--- prelude: > Add a configuration variable indicating that ENS transport zones can be used. features: - | Add a new configuration variable ``ens_support`` to the ``nsx_v3`` section. This indicates if a tenant or admin can create ENS networks. vmware-nsx-12.0.1/releasenotes/notes/dns-search-domain-configuration-a134af0ef028282c.yaml0000666000175100017510000000045613244523345031011 0ustar zuulzuul00000000000000--- prelude: > Enable an admin to configure a global search domain. This is used if no search domain is configured on a subnet. features: - A new configuration variable in the nsxv section will enable the admin to configure a search domain. The new variable is dns_search_domain. vmware-nsx-12.0.1/releasenotes/notes/provider-security-group-2cfc1231dcaf21ac.yaml0000666000175100017510000000115613244523345027705 0ustar zuulzuul00000000000000--- prelude: > Tenant specific blocking firewall rules to be managed via Neutron security-group API features: - Admin user can now create a security-group with the 'provider' flag to indicate whether rules take implicit 'deny' action. - Provider security-group rules takes precedence over normal security-group rules - Each tenant may have at most one security-group marked as provider - New tenant ports are associated with the provider security-group automatically, unless explicitly asked otherwise - Supported by NSX V3 - Supported by NSX VSphere, version 6.2 or newervmware-nsx-12.0.1/releasenotes/notes/nsxv3-lbaasv2-driver-57f37d6614eb1510.yaml0000666000175100017510000000037213244523345026422 0ustar zuulzuul00000000000000--- prelude: > NSXv3 plugin supports LBaaS v2 using NSX native load balancing. features: - | Add NSXv3 neutron lbaas v2 driver to support LBaaS v2.0. This includes both layer4 and layer7 load balancing via NSX native load balancer. vmware-nsx-12.0.1/releasenotes/notes/nsxv3-switching-profiles-250aa43f5070dc37.yaml0000666000175100017510000000047713244523345027471 0ustar zuulzuul00000000000000--- prelude: > The nsx-v3 plugin can add pre-configured switching profiles to new nsx ports. The configuration can also be done per availability zone. features: - | The nsx-v3 plugin can add pre-configured switching profiles to new nsx ports. The configuration can also be done per availability zone. vmware-nsx-12.0.1/releasenotes/notes/nsxv3-native-dhcp-config-2b6bdd372a2d643f.yaml0000666000175100017510000000056013244523345027456 0ustar zuulzuul00000000000000--- prelude: > Starting Newton release we added support for native DHCP and metadata provided by NSXv3 backend. features: - Since now most of the NSXv3 deployment are using native DHCP/Metadata, default this option native_dhcp_metadata to True. By default, it will use NSXv3 native DHCP and Metadata unless this has been explicitly set to False. vmware-nsx-12.0.1/releasenotes/notes/.placeholder0000666000175100017510000000000013244523345022027 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/releasenotes/notes/nsxv-availability-zones-85db159a647762b3.yaml0000666000175100017510000000072313244523345027333 0ustar zuulzuul00000000000000--- prelude: > The NSX-v plugin supports availability zones hints on routers and networks creation in order to create them on the requested nsx datastore and resource pool. features: - The NSX-v plugin supports availability zones hints on routers and networks creation in order to create them on the requested nsx datastore and resource pool. The availability zones configuration includes the resource pool, datastore, and HA datastore. vmware-nsx-12.0.1/releasenotes/notes/universal-switch-41487c280ad3c8ad.yaml0000666000175100017510000000057013244523345026165 0ustar zuulzuul00000000000000--- prelude: > The NSX-v plugin supports universal switches. features: The NSX-v universal transport zone can be used in order to create universal switches as VXLAN networks over all the nsx managers. For this option to be enabled, the vdn_scope_id parameter in nsx.ini should be set to the ID of the universal transport zone which is 'universalvdnscope'. vmware-nsx-12.0.1/babel.cfg0000666000175100017510000000002013244523345015453 0ustar zuulzuul00000000000000[python: **.py] vmware-nsx-12.0.1/setup.cfg0000666000175100017510000001200413244524600015546 0ustar zuulzuul00000000000000[metadata] name = vmware-nsx summary = VMware NSX library for OpenStack projects description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = https://launchpad.net/vmware-nsx classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 [files] packages = vmware_nsx [entry_points] console_scripts = neutron-check-nsx-config = vmware_nsx.check_nsx_config:main nsxadmin = vmware_nsx.shell.nsxadmin:main nsx-migration = vmware_nsx.api_replay.cli:main neutron.db.alembic_migrations = vmware-nsx = vmware_nsx.db.migration:alembic_migrations neutron.core_plugins = vmware_nsx = vmware_nsx.plugin:NsxPlugin vmware_nsxv = vmware_nsx.plugin:NsxVPlugin vmware_nsxv3 = vmware_nsx.plugin:NsxV3Plugin vmware_dvs = vmware_nsx.plugin:NsxDvsPlugin vmware_nsxtvd = vmware_nsx.plugin:NsxTVDPlugin firewall_drivers = vmware_nsxv_edge = vmware_nsx.services.fwaas.nsx_v.edge_fwaas_driver:EdgeFwaasDriver vmware_nsxv3_edge = vmware_nsx.services.fwaas.nsx_v3.edge_fwaas_driver_v1:EdgeFwaasV3DriverV1 vmware_nsxv3_edge_v1 = vmware_nsx.services.fwaas.nsx_v3.edge_fwaas_driver_v1:EdgeFwaasV3DriverV1 vmware_nsxv3_edge_v2 = vmware_nsx.services.fwaas.nsx_v3.edge_fwaas_driver_v2:EdgeFwaasV3DriverV2 vmware_nsxtvd_edge_v1 = vmware_nsx.services.fwaas.nsx_tv.edge_fwaas_driver_v1:EdgeFwaasTVDriverV1 vmware_nsxtvd_edge_v2 = vmware_nsx.services.fwaas.nsx_tv.edge_fwaas_driver_v2:EdgeFwaasTVDriverV2 neutron.service_plugins = vmware_nsxv_qos = vmware_nsx.services.qos.nsx_v.plugin:NsxVQosPlugin vmware_nsxtvd_lbaasv2 = vmware_nsx.services.lbaas.nsx.plugin:LoadBalancerTVPluginV2 vmware_nsxtvd_fwaasv1 = vmware_nsx.services.fwaas.nsx_tv.plugin_v1:FwaasTVPluginV1 vmware_nsxtvd_fwaasv2 = vmware_nsx.services.fwaas.nsx_tv.plugin_v2:FwaasTVPluginV2 vmware_nsxtvd_l2gw = vmware_nsx.services.l2gateway.nsx_tvd.plugin:L2GatewayPlugin vmware_nsxtvd_qos = vmware_nsx.services.qos.nsx_tvd.plugin:QoSPlugin vmware_nsxtvd_vpnaas = vmware_nsx.services.vpnaas.nsx_tvd.plugin:VPNPlugin neutron.qos.notification_drivers = vmware_nsxv3_message_queue = vmware_nsx.services.qos.nsx_v3.message_queue:NsxV3QosNotificationDriver neutron.ipam_drivers = vmware_nsxv_ipam = vmware_nsx.services.ipam.nsx_v.driver:NsxvIpamDriver vmware_nsxv3_ipam = vmware_nsx.services.ipam.nsx_v3.driver:Nsxv3IpamDriver vmware_nsxtvd_ipam = vmware_nsx.services.ipam.nsx_tvd.driver:NsxTvdIpamDriver vmware_nsx.extension_drivers = vmware_nsxv_dns = vmware_nsx.extension_drivers.dns_integration:DNSExtensionDriverNSXv vmware_nsxv3_dns = vmware_nsx.extension_drivers.dns_integration:DNSExtensionDriverNSXv3 vmware_dvs_dns = vmware_nsx.extension_drivers.dns_integration:DNSExtensionDriverDVS vmware_nsx.neutron.nsxv.router_type_drivers = shared = vmware_nsx.plugins.nsx_v.drivers.shared_router_driver:RouterSharedDriver distributed = vmware_nsx.plugins.nsx_v.drivers.distributed_router_driver:RouterDistributedDriver exclusive = vmware_nsx.plugins.nsx_v.drivers.exclusive_router_driver:RouterExclusiveDriver oslo.config.opts = nsx = vmware_nsx.opts:list_opts networking_sfc.flowclassifier.drivers = vmware-nsxv-sfc = vmware_nsx.services.flowclassifier.nsx_v.driver:NsxvFlowClassifierDriver openstack.cli.extension = nsxclient = vmware_nsx.osc.plugin openstack.nsxclient.v2 = port_create = vmware_nsx.osc.v2.port:NsxCreatePort port_set = vmware_nsx.osc.v2.port:NsxSetPort router_create = vmware_nsx.osc.v2.router:NsxCreateRouter router_set = vmware_nsx.osc.v2.router:NsxSetRouter security_group_create = vmware_nsx.osc.v2.security_group:NsxCreateSecurityGroup security_group_set = vmware_nsx.osc.v2.security_group:NsxSetSecurityGroup subnet_create = vmware_nsx.osc.v2.subnet:NsxCreateSubnet subnet_set = vmware_nsx.osc.v2.subnet:NsxSetSubnet project_plugin_create = vmware_nsx.osc.v2.project_plugin_map:CreateProjectPluginMap project_plugin_show = vmware_nsx.osc.v2.project_plugin_map:ShowProjectPluginMap project_plugin_list = vmware_nsx.osc.v2.project_plugin_map:ListProjectPluginMap vmware_nsx.neutron.nsxv.housekeeper.jobs = error_dhcp_edge = vmware_nsx.plugins.nsx_v.housekeeper.error_dhcp_edge:ErrorDhcpEdgeJob error_backup_edge = vmware_nsx.plugins.nsx_v.housekeeper.error_backup_edge:ErrorBackupEdgeJob [build_sphinx] source-dir = doc/source build-dir = doc/build all_files = 1 [upload_sphinx] upload-dir = doc/build/html [compile_catalog] directory = vmware_nsx/locale domain = vmware_nsx [update_catalog] domain = vmware_nsx output_dir = vmware_nsx/locale input_file = vmware_nsx/locale/vmware_nsx.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = vmware_nsx/locale/vmware_nsx.pot [pbr] autodoc_index_modules = 1 [wheel] universal = 1 [egg_info] tag_build = tag_date = 0 vmware-nsx-12.0.1/PKG-INFO0000664000175100017510000000340613244524600015026 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: vmware-nsx Version: 12.0.1 Summary: VMware NSX library for OpenStack projects Home-page: https://launchpad.net/vmware-nsx Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description-Content-Type: UNKNOWN Description: =================== VMware-NSX package =================== You have come across the VMware-NSX family of Neutron plugins External Resources: ------------------- The homepage for the VMware-NSX project is on Launchpad_. .. _Launchpad: https://launchpad.net/vmware-nsx Use this site for asking for help, and filing bugs. Code is available both git.openstack.org_ and github_. .. _git.openstack.org: https://git.openstack.org/cgit/openstack/vmware-nsx/tree/ .. _github: https://github.com/openstack/vmware-nsx For help on usage and hacking of VMware-NSX, please send a message to the openstack-dev_ mailing list. .. _openstack-dev: mailto:openstack-dev@lists.openstack.org For information on how to contribute to VMware-NSX, please see the contents of the CONTRIBUTING.rst file. Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 vmware-nsx-12.0.1/ChangeLog0000664000175100017510000122016713244524574015523 0ustar zuulzuul00000000000000CHANGES ======= 12.0.1 ------ * TVD IPAM support * NSX-V Admin Utils: List BGP GW edges * NSX-V3 add ens\_support arg to devstack * TVD: Add service plugins to separate list results * NSX-V3: do not add the DHCP profile for ENS networks * NSX|V: ensure that no sec groups and port sec will discard traffic * NSX|TVD: add ability to add extra filters * NSX|V: treat edge case when spoofguard entry already exists * NSX-v3 VPNaaS: Use a local address from the external network * TVD: Make sure lbaas subnet belongs to the correct plugin * AdminUtils: Skip housekeeping on admin utils calls * TVD: fix get\_<>s but plugin with filters * admin utility enabled nsx-update for security groups (V and T) * TVD Fwaas: prevent adding wrong plugin routers to FW * NSX|V3: ensure that 0 is in the guest tag range * Update UPPER\_CONSTRAINTS\_FILE for stable/queens * Update .gitreview for stable/queens * TVD: Make sure subnets project is the same as the network * Add logging to help detect port security conflicts * NSX|V3: validate external subnet has no DHCP enabled * NSX-V3: Update vlan-transparent case for network * NSX-V: make sure error fw can be deleted * NSXv DNS integration * NSX|V: default support for IGMP traffic * NSX|V3: only allow physical network to be confogured for external net * NSX|V3: allow a router to be attached to a VLAN network * NSX|V: spoofguard\_enabled disabled enhancement * NSXv3 DNS integration * TVD: do not support policy extension for nsx-t plugin * TVD: fix get\_<>s but plugin with filters * TVD: Fix filtering without project id * TVD: no longer experimental * Use the new PTI for document build * NSX|V3: enable VLAN transparent to be configured with VLAN * TVD: Fix FWaaS typo * AdminUtils NSX-v: Fix SG migration to policy * TVD: ensure that extra attributs are set for router iif they exist * AdminUtils: Skip unsupported resources * NSX|V3: enahance admin utility for metadata proxy list * AdminUtil NSX-v3: recreate dhcp server for a network * NSX|TV: validate plugin is available * NSX-v3: VPNaaS supports only No-SNAT routers * Updated from global requirements * NSXv3: allow use api\_workers=-1 * NSX\_V3: add flag to indicate if ENS networks can be created * use vlantransparent api def from neutron-lib * TVD: FWaaS plugins * NSX|V: ensure that only LAG is configured and not standby * TVD: make security group logging more robust * TVD: ensure get\_ports works for DVS plugin * NSX-V3 devstack: cleanup VPNaaS objects * use multiprovidernet api definition from neutron-lib * TVD: ensure that can return specific tenant/project requests * Updated from global requirements * NSX-V3 FWaaSV2 prevent adding compute ports to FW group * TVD AdminUtils: Use only objects from specific plugin * NSX-v3: fix update\_router\_firewall error handling * TVD|AdminUtils: Add all nsxv/v3 utils to tvd * Updated from global requirements * AdminUtils: NSX-V3: Show and update the NSX rate limit * NSX-v| Do not allow setting qos policy on port * Fix VPN api as the NSX api changed * NSX|V: enable binding floating ip's per AZ * TVD availability zones * TVD: support lbaasv2 'provider' filtering * NSX\_V3: enable non native DHCP to work with AZ support * NSX-v3: Inform FWaaS when a router interface is removed * TVD: move plugins init\_complete code to the end of init * NSXv, DVS: Use neutron DB name instead of neutron\_nsx * use api attributes from neutron-lib * TV: doc creation of admin for a specific plugin * TVD: improve default plugin failure at boot time * NSX\_V3: make sure that member creation is serialized * NSX-v3: Use logical switch id in FWaaS V2 rules * Remove leftover debug logs * TVD: Add VPNaaS wrapper driver * TVD project plugin mappings validations * NSX|V: ensure port security is enabled for address pair support * NSX\_V3: ensure that the correct router attributes are read * BUG\_FIX: policy.d copy over for devstack * TVD: use warning if plugin is not supported * TVD: improve get subnets * TVD: get\_address\_scopes and get\_subnet\_pools support * use service aliases from plugin constants * NSX|V3: VPNaaS support * TVD: create subnet bulk support * use callback payloads for \_SPAWN events * NSX|v+v3: Prevent adding 0.0.0.0 route to router * AdminUtils NSX-V3 fix for FWaaS callbacks * TVD: fix logging configuration at boot * Housekeeper: Per-job readonly option * NSX|TVD: ensure that port update is under transaction * NSX|V3: fix load balancer admin util list * TVD: Add NSX-v CI exclusions * NSX-V+V3: Fix network availability zones extend func * Housekeeper: trigger execution * NSXv HK: recover broken backup edge appliances * TVD: fix plugin apis to allow CI job to succeed * DVS: fix get\_por and get\_portst * TVD: Fix devstack cleanup * AdminUtils NSX-v3: Add config import * NSX-v: Fix VPNaaS driver * NSX|v+v3: Prevent adding default route to router * TVD: Update devstack doc with different services * TVD servives: Handle the case where plugin is disabled * TVD+BGP: adapt the nsx bgp plugin to be used in TVD * TVD: l2gw support * NSX-V3 Fix router availability zones * TVD: Add default plugin configuration * Updated from global requirements * NSXv: update static routes in LBaaS loadbalancer * NSXv: Allow exclusive router deletion with LBaaS * Plugin housekeeper * NSX-TVD basic unittests * NSXv3: Continue HM delete in case of inconsistence * TVD: Support DVS plugin calls * TVD: add in DVS extenion\_driver support * TVD: ensure no empty project is added to the DB * NSX-TVD QoS drivers support * NSX-TVD: fix extensions list * NSX-TVD: Fix md proxy internal tenant * NSX-TVD migration admin util * NSX-TVD: Add some logs at init and mappings * NSX-TV fwaas drivers * Updated from global requirements * TVD: ensure that get ports does not throw exception * NSX TVD: V, T and simple DVS Coexist in the same plugin * Updated from global requirements * NSX|V3: Move logic from fwaas driver to the v3 plugin * NSX|V3 complete init of fwaas core plugin * NSX|V: Fix Fwaas for distributed router * NSXv: Handle LBaaSv2 listener inconsistency * NSX|V3: improve ENS exception limitations * NSX|V3: transparent support for logical switches * NSX|V prevent deleting md proxy neutron objects * TVD: add support for get\_\*s * NSX|V3: prevent DHCP port deletion with native support * TVD: support housekeeper for TVD * NSX|V3: ensure provider securiry updates are done * NSX|V3: upgrade NSX version in the unit tests to 2.2 * Create common devstack files for V and T * NSX|V3: ensure that metadata works with windows instances * Fix DVS devstack configuration * Integrate with floating ips OVO * NSX|V: Add configuration for shared rotuer size * NSXv3: Handle association fip to vip in different cases * NSX|V: Improve no-dhcp subnet unit tests * TVD: enable DVS to be configured * NSXv3: Fix a typo to return router\_id * NSX|v: Add qos policy id to port * Reset static dhcp binding on mac address change * NSXv3: Validate LB router gateway * NSXv3: Update FIP on VIP gracefully * Updated from global requirements * NSXv3: Update VIP on multiple listeners * NSXv3: Fix pool member update * NSX|V: validate DVS in network-vlan-ranges * NSX|V: support name update for VLAN and FLAT networks * NSXv3: Fix listener/pool update * Update the doc link * TVD: LBaaS support * NSX|V3 rename the default availability zone * NSX|v3: Fix create network exception handling * NSX|v3: Add routers availability zones * NSX|v+v3: Support default availability zones * NSXv3: Check cert existance before creation * NSX|V: add HA status to admin list of edges * NSX|V: Ignore empty gw info on router creation * NSX|V3: fix create port with provider security group * Add metadata setting for new created dhcp edge * NSXv3: Refactor LBaaS L7 code * Remove setting of version/release from releasenotes * Updated from global requirements * Retry with stale DB values * NSX|V3: use vmware-nsxlib that does not have neutron-lib * NSX|v validate PG provider networks * NSX|V3: ensure that a readable name is set for LB resources on NSX * NSX|v: Fix provider network creation * NSX-Migration of default security groups * Updated from global requirements * NSXv3: Add validation when attaching lbs to router * use flavors api def from neutron-lib * NSX|V3: enable DHCP and metadata to work with non overlay networks * NSX|V3: allow updating a floatingip without changing the port * DVS: ensure that network is configured if one of more hosts are down * use dvr api def from neutron-lib * Updated from global requirements * use l3 flavors api def from neutron-lib * NSX|v+v3: fix validate network callback * NSX|V3 fix lbaas get plugin code * NSX|V: use elevated context to get external net for router gw * NSX|V: no spoofguard policy for portgroup provider network * NSX|V: fix timeout out issues * use l3 ext gw mode api def from neutron-lib * NSX|V3: use contexts 'user\_identity' for the 'X-NSX-EUSER' header * NSX|V prevent adding illegal routes * NSX|V fix exception & typo in vpn driver * Fix devstask doc for service plugins * use router az api def from neutron-lib * NSX-V3| Do not allow adding QoS to dhcp ports * NSX|V3: allow VLAN router interfaces if nsx supports it * Infrastructure support for FWaaS logging * NSX|V: ensure that a session object is created per thread/context * NSXv3: Update tag if lb name is updated * NSX|V: Use flavors for load balancer size * NSXv3: Fix TERMINATED\_HTTPS listener * Cleanup test-requirements * NSX|V3: address edge case of subnet deletion when attached to router * Updated from global requirements * Updated from global requirements * NSX|V3: add request-id support * Fix broken unit tests * use l3 api def from neutron-lib * cleanup unit test usage of api extension maps * NSXv3: Limit tag value to maximum 40 * use extra route api def from lib * DVS: Add support for dns-integration extension * NSX|V3: fix lbaas exception text * NSXv3: Truncate lb name in tag * use addr pairs api def from lib * NSX|V Fix get\_dhcp\_binding output parsing * Logging neutron request-id in NSX * Fix security groups tests * use FAULT\_MAP from neutron-lib * NSX|V: add in security rule tags for 'project\_id' * Fix shared routers locks * NSX|V3: inject X-NSX-EUSER * Fix to use . to source script files * NSXv3: Fix load balancer delete issue * use availability zone api def from lib * NSX|V3: use route from nsxlib * NSX|V: honor nsxv.spoofguard\_enabled at port create * NSX|V remove warning on no dhcp edge * NSX|V save backend calls on delete subnet * use external net api def from lib * NSX|V Adding unittests for SG rule bulk creation * NSX|V Ignore port-security at the network level * NSX|V add shared router logs * NSX|V: Support add/remove dvs for VLAN provider networks * NSX|V add more details to vcns debug logs * NSX|V: use session objects for requests * NSX|V: Fix vcns timeout exception * NSX|V Do not share edges between tenants * NSX|V fix exclude list counting * NSX|V3: ensure that DHCP profile is not created for ENS port * NSXv: Recover from LBaaSv2 HM inconsistency * zuul v3 gate changes * NSX|v: fix broken unittests * NSXv3: Update router advertise\_lb\_vip * NSX|V3: Disallow port-security on port/net on ENS TZ * Updated from global requirements * Remove SCREEN\_LOGDIR from devstack * NSX|V3: allow VLAN router interfaces if nsx supports it * NSX|V3: Add DHCP relay firewall rules * NSX|V3 refactor fwaas to support plugin rules * NSX-V: add support for mac learning extension * NSXv3: Fix LB pool algorithm * NSXv3: Change reponse code for L7 redirect and reject * Fix typo * NSX|V Fix warning when disabling network port security * NSXv3: Handle floating ip for loadbalancer VIP * NSX|V3 indentation & typo fixing in client certificate * NSX|V3 make certificate unittests inherit from sql tests * Updated from global requirements * NSX|V3: nsx-provider network bugs fixing * use new payload objects for \*\_INIT callbacks * NSX|V raise error on mdproxy init * NSX|V: check md proxy handler exists before usage * NSX|V: do not build NAT rules for v6 networks * NSX|V3 Add validations to DHCP relay * NSXv3: Change LB rule match type to REGEX * Updated from global requirements * use common constants from lib * NSXv3: Fix loadbalancer stats exception * NSX|V: add exception log if router create fails * NSXv3: Fix a typo in LBaaS member\_mgr * NSX|V complete init of fwaas core plugin * Revert "Temporarily disable flowclassifier tests" * NSX|v3: FWaaS v2 support * Updated from global requirements * Remove wrong alias from setup * NSX|V: ensure the allowed logging includes default DHCP rules * Updated from global requirements * Temporarily disable flowclassifier tests * NSX|v3: DHCP Relay support * NSX|v3: provider networks updates * Updated from global requirements * NSX|V: prevent V6 subnet from being attached to a DVR * NSX|V AdminUtil handle orphaned router vnics * NSXv: add timeout parameter for backend calls * NSX|V: Add log messages to retry attempts * Update reno for stable/pike * NSX|V3: AdminUtil updating server ip of md-proxy * NSXv3: Fix L7 rule create/delete base on latest API * Fix to use . to source script files * NSXv3: Update binding if listener has been deleted * NSXv3: Rewrite client certificate provider * NSXv port-binding support * Updated from global requirements * NSX|V3: Do not enable port security on router interface * Nsx admin: Initialize nsx-lib on demand * Updated from global requirements * NSX|v3 use nsxlib in devstack cleanup * NSX|v AdminUtil ignore irrelevant orphaned networks * NSX|V3 Admin utils expect different notFound error * Remove unuse router-bindings constants * NSX|v: Admin Util remove router binding orphaned entries * NSX|V3: ensure that update port does provider validations * NSXv3: Refactor LBaaS L7 based on API change * Add NSXv3 LBaaS driver config for devstack * NSXv: Implicitly disable port-security for direct vnic-type ports * NSX|V3: enforce provider rules not being set when not port sec * NSX|V: keep availability zones on router migration * Updated from global requirements * Fix port create with mac learning set to false * Tag the alembic migration revisions for Pike * AdminUtils:NSX|V3: Add orphaned routers list & clean * Updated from global requirements * NSX|V: remove unuse dmethod \_get\_sub\_interface\_id * NSX|V fix error message when changing port security * NSX|V3: validate transport zone at provider net creation * NSX|V: ensure locking when removing a network from DHCP edge * NSX|v3: add network/port description to backend objects * NSXv3: Prevent router deletion if it has lb attachment * NSXv3: Refactor LBaaS driver to fix binding issue * Updated from global requirements * Don't add provider security-group when psec is disabled * NSX|v3: do not allow setting router admin state to False * NSX|v3 plugin: Fix typo * NSX|v3: process the port security of the dhcp ports * NSX|v: get internal net by az fix * Remove vmware\_nsx\_tempest * NSXv3: Add new tags for LBaaS resources * NSX|v: Handle address scope change on subnetpool * NSX|V3: ensure that MAC learning does not invoke switch profiles * NSX|v: fix deletion of edges in PENDING\_UPDATE * NSX|V: skip interface actions on old lbaas member create * NSX|V: serialize rule removal * NSX|V: ensure that segmentation ID's are unique * NSXAdmin: Fix nsx-v gw-edge deploy witho no default-gw * NSX|v3: configure additional switching profiles per AZ * NSX|V and NSX|V3: remove deprecated config variables * NSXv3: Remove os-lbaas-lb-id tag for lb service * NSXv3: Add release note for LBaaS * Add in support for direct-physical vnic types * NSXv3: Fix deletion issue when listener has pool * Add Pike release notes * NSX|v+v3: forbid multiple fixed ips in a port * Admin-Utils NSX|v3: Fix constants and typo in cert util * NSX|V: skip metadata proxy subnets for BGP updates * NSX|V: autodraft does not require a NSX reboot * NSX|V3: fix trunk issues * NSX|V: make use of granular API for getting DHCP binding * NSXv3: Delete lb binding after pool deletion * NSX|V: save PUT when restarting neutron * NSX|v: lock shared routers interface actions * NSXv3: Update func to add virtual server to service * AdminUtil|NSX-v: complete plugin init * Admin-Utils|NSX-v: dhcp recreate fix * NSX|v: add device id to dhcp ports * NSX|v3: do not allow provider sec groups if not port-sec * NSXv3: Add admin utils for LBaaS resource * NSX|V: ensures updates on subnet are atomic * NSX|v3: disable port security on dhcp ports * AdminUtil|NSX-v: complete plugin init * use neutron-lib address scope apidef * NSXv3: Handle address scope change on subnetpool * use dns api def from neutron-lib * NSX|V3: ensure that subnet update takes host routes into account * NSXv: LB objects delete failure while pool binding missing * FWaaS: remove deprecated exceptions * Unblock gate unit tests * NSX|V3: don't fail on already deleted network/port * use qos constants from neutron-lib * NSX|V: remove invalid dvs-id validation * use qos DriverBase from neutron-lib * NSXv3: Add Neutron LBaaS Layer7 Support * NSXV: use correct exception for NoResultFound * NSXv: locking DHCP * NSXv DHCP locking refactor * NSX|V: ensure that router updates are atomic * NSXAdmin: Fix default gateway setting when creating BGP gw-edge * Admin util: add not for DHCP and metadata native support * NSX|V3: ensure that DB binding is updated if there is an IP change * Tempest: Fixed a bug # 1914831 * Updated from global requirements * Updated from global requirements * NSXv BGP: Fix bgp-peer esg-id validation * NSX|nsxadmin install update * Updated from global requirements * Removing qos test scenarios due to pyshark removal from test-requirements.txt. Will add these tests back with new tempest design and workaround for pyshark * Remove pyshark from test-requirements * NSXv: Fix method name typo * Discard east-west traffic between different address scopes * NSXv3: Neutron LBaaS nsxv3 support * NSX|v: refactor shared router FW rules creation * NSXV: Fix default ICMPv6 firewall rules * NSX|V: enable plugin to decide on VLAN tag * NSXT instance migration: Improve logging * VMware-NSX:add install doc command * NSX|V3: ensure neutron raises better exceptions * Enable admin or owner to configure snat * Local copy of scenario test base class * NSX|V3: admin utility get ports to skip Qos read * NSX|V: ensure no sec groups if network port security is disabled * NSX|V3: honor host routes * Tempest: Added new design for tempest test cases * NSXv3: Move away from locking in cert provider * NSX-migration: Add logging and handle errors * Tempest: tempest.test.attr() is deprecated. Moving to new function * NSXV: ensure that binding update does not fail if deleted * NSX|V3: Configure TZ, router and profiles using tags * NSX|v: Fix error on service insertion config * NSXT instance migration: support file logging * NSXv: Support ipsec VPNaaS on nsxv driver * NSX migration fix nosnat + keep ips * NSXv: Add a configured delay after enabling ECMP on edge * NSX|V3: support ranges in fw rules ports * NSX|V: support large port ranges in service insertion * NSX|V: support big ranges in fw rules ports * NSX|v+v3: Use elevated context for address scopes checks * NSX|V: treat edge cases with edge deletions * NSX|v: handle old loadbalancers interfaces * NSXv: Backup pool locking * Use flake8-import-order plugin * NSX|V3: devstack cleanup exclude list ports on devstack * [Tempest]: Adding of 'plr' attribute for distributed routers * NSXv3: Add lock around filename in cert provider * NSX|V3: fix devstack when native DHCP is False * Added compatibility to pyroute2>=0.4.15 * NSX|v3 refactor trunk driver to use nsxlib api * AdminUtils NSX|v: recreate router by Id * NSX|V: Support QoS ingress rules * NSX|V3: clean up parent/tag handling * NSX|v: Add FW rules for same scope subnets * AdminUtils NSX|V: router recreate fix type check * [Tempest] spoofguard test fix from OpenStack plugin change * NSX|V: do not fail distributed router deletion * NSX|V3: Do not add SNAT rules if same address scope * NSX|v3: Enforce address scopes for no-NAT routers * AdminUtil NSX|v: Fix dhcp recreate * NSX|v+v3: Fail if adding another project router to FWaaS * NSXv BGP: Fix bgp-peer esg-id validation * NSX|V3: ensure that devstack cleanup delets switches * NSXv: use regular DHCP edges for VDR metadata * NSX|v3: Add firewall tag to the router * NSX|V: Do not add SNAT rules if same address scope * Revert "Remove neutron-fwaas exception usage" * [Tempest] NSXV L2GW cleanup fix * {Tempest]: Changes done while updating port with PSG * Fix a few pep8 errors in db.py * Remove neutron-fwaas exception usage * Create base plugin for common nsx-v/nsx-v3 code * NSX|V: only update host groups if AZ correctly defined * NSXv: LB pool delete failure while binding missing * NSX|V: delete old pending-update/delete edges * NSX|V3: fix trunk initialization * Remove white space * NSXv: Don't allow security-group in no port-security * [AdminUtil] NSX|v3: Update router NAT rules to not bypass the FW * NSX|V3: Support QoS ingress rules * NSX|V3: enable creation of provider VLAN network * NSX|V: make sure host groups updated only once * NSX|v AdminUtil: Fix edges utilities with az * use service type constants from neutron\_lib plugins * NSXv BGP: Fix shared router on gateway clear * NSXv BGP: Fix password value when not specified * NSX|V: prevent a floating IP being configure on a no snat router * NSX|V3 Fix FwaaS rule with no service * Remove new unsupported unittests * NSX|v: disable service insertion if no driver configured * NSX|V3: Warn if backend does not support FWaaS * [NSXV]Fix l2gateway creation failure * NSX|v+v3: Support default qos policy * NSX|v: Fix LBaaS session persistence * NSX|V3: Use QoS precommit callback to validate rules * NSXv3: Add util to check version 2.1.0 * NSX|V: Keep existing members when updating LBaaS pool * NSX|v: refactor FWaaS driver * NSX|V3: FWaaS-v1 support * NSX-V3| fix devstack cleanup * NSXv3: Race condition fix for cert provider * Address OVO breakage * [Tempest]: Adding of scenario cases for FWaaS * [Tempest]: Adding sleep between PSG creation and adding to backend * Fix NSX|V3 unit tests * NSXv AdminUtil: Final touches * Fix devstack doc titles * NSX|V: Fix FWaaS exceptions * NSX|V: Call NSX backend once for dvs validations * NSX|V raise error when FWaaS uses unsupported routers * NSX|V AZ validation message * NSX|V: Validate AZ HA configuration * NSX|V: Fix LBaaS session persistence without cookie name * NSX|V: Validate availability zones dvs-ids * NSX|v: call backend scoping objects only once during init * Tempest: Fixed failed few tempest scenario test cases * NSXv3: More locking for certificate provider * Fix NSX|v3 qos unit tests * NSX|V: use the same DHCP if multiple subnets on same network * NSXV3: harden subnet creation for external networks * NSX|V: honor provider security rules * QOS: fix unit test breakage * NSX-T Migrate: migration script for libvirt * NSX-migration: remove physical-network for flat networks * NSXv BGP: Fix get\_bgp\_peer 'esg\_id' attr visibility * NSX Migration: support keystone v3 & other fixes * LBaaS: Share lb\_const module for nsxv and nsxv3 * use attribute functions/operations from neutron-lib * NSXv BGP: Adding IP address check for ESG BGP peer * NSXv3: Solve race condition in DB cert provider * NSXv: Fix before delete notification for shared rotuer * NSXv Admin util: BGP GW edges deployment and configuration * use core resource attribute constants from neutron-lib * NSX Admin: Fix plugin identification * LBaaS: Share base\_mgr between nsxv and nsxv3 * NSXV3: ensure that devstack does not get invalid config * BGP unittests * NSX|v: Fix LBaaS session persistence * NSXv BGP: Return with error for invalid bgp-peer-remove request * NSXv BGP: Raise an error if user add duplicate networks or peers * NSXv BGP: Add policy rules * Skip DHCP options tests in v3 tempest * NSXv BGP: Use elevated context inside callbacks * NSX|V: Support tftp-server dhcp option * NSX|V: Skip md-proxy routers in fwaas * Tempest: Tempest test NSX security groups failing * Fix nsx-migration script * NSX|V fix crash when enabling subnets dhcp * NSXv3: Fix devstack issue on compute node * NSX|v: keep snat status when changing router type * [Tempest]: Adding of more cases for FWaaS * Switch to SUBNET from SUBNET\_GATEWAY * NSX|V: Fix broken unit tests * Fixes vmware\_nsx\_tempest tempest plugin issues with tempest * Tempest: Port Types network cleanup fix * NSX|V3: ensure that network rollback works correctly * NSX|V: ensure that the subinterface validations are atomic * Update fwaas driver in devstack.rst * Use vmware\_nsx aliases for neutron core plugins * use extra\_dhcp\_opt api-def from neutron-lib * Add firewall\_drivers entry\_point in setup * NSX|V: Fix use case with no FWaaS for a router * use is\_port\_trusted from neutron-lib * NSXv BGP: Use BGP peering password correctly * NSXv BGP: Allow BGP only on networks with address-scope * NSX|V: ensure that FLAT provider network is deleted * NSXv: Enforce address scopes for no-NAT routers * AdminUtils: Fix crash in nsx-v router-recreate * AdminUtils: Fix security-group migrate * AdminUtils: Fix firewall-section list of operations * [Tempest]: Adding of FWaaS api testcases * Policy: enable distributed router to be created by all * NSXV/L2GW: validate that portgroup is used only once * use plugin constants from neutron-lib * NSXv Admin: Print usage if no properties are given for bgp-gw-edge * NSX|V: LOG.exception only when there is an exception * AdminUtils:NSX|V: Add orphaned networks list & clean * Use oslo\_utils to mask password in logs * Remove pbr warnerrors in favor of sphinx check * NSXv BGP: Fix KeyError on not an ESG bgp-peer * NSXv: Adding missing devstack configuration for BGP * NSXv Admin: Fix gw-edge firewall config * NSX|V: Do not add NAT rules in router firewall with FWAAS * NSX|v AdminUtil: Fix bgp-gw-edge create with az * NSX|V3: ensure 'exclude' tag is correctly set * Unbreak gate again * Address gate issues * NSX|V: fix missing spoofguard ID validation * NSXv: Mask passwords when logging debug messages * NSX|v: Add fip to exclusive router after migration * AdminUtils:Fix NSX-v metadata secret * Add vmware\_nsxv entry\_point in setup * NSX|V3: treat missing exclude list entry on delete * Rename api-replay to nsx-migration * Fix policy file breakage * Tempest: 2 scenario tests are fixed * NSXv BGP: Add more unittests * Split and move policy rules to policy.d dir * NSX|V: prevent deadlock with subnet creation and deletion * NSXv: add lbaas statistics support * NSX|V: fix vnic allocation for AZ and metadata * NSX|v: Distributed router type update failure * Tempest|DVS: Add \_list\_ports for dvs scenario test case * NSXAdmin: Block cert commands when feature is off * [Tempest]: Adding of removed method from upstream in vmware\_nsx\_tempest repo * Address network filtering issue * NSXv BGP: Fixing get-advertise-routes * AdminUtil:NSX|V3: change metadata server * AdminUtil:NSX|V3: Fix plugin calls * Basic QoS scenarios: Testing bandwidth-limit, DSCP rule with traffic root@prome-mdt-dhcp412:/opt/stack/tempest# python -m testtools.run vmware\_nsx\_tempest.tests.scenario.test\_qos\_ops Tests running... tempest/clients.py:45: DeprecationWarning: Using the 'client\_parameters' argument is deprecated client\_parameters=self.\_prepare\_configuration()) Warning: Permanently added '172.24.4.9' (RSA) to the list of known hosts * Update changes for \_get\_marker\_obj * NSX-v3| fix delete-router when there is no backend id * NSX-v| Fix FWAAS rules in DB * use neutron-lib port security api-def * Update code to work with oslo.config enforcements * Fix OSC client to work with versions greater than 3.10 * NSX|V3: admin util for migrating exlcude list ports * Fix client breakages * use neutron-lib constants rather than plugin constants * Correct config help information error * NSX|V3: Support specific IP allocations in IPAM * OSC 3.10 integration * [Tempest]: Changes done in allowed address pair scenrio testcases * [tempest]: Changes done for port security scenario testcases * NSX-v+v3| remive unused QoS definition * Tempest: Adding network config param * Tempest: OpenStack Port Types Support API tests * Adding pyshark requirements for QoS scenario testing * Fix gate jobs * [Tempest]: Adding "Prevent NSX admin from deleting openstack entities" testcases Incorporated nsxv3\_client.py for backend operations Made changes to nsxv3\_client.py for specific reqests Test results: root@prome-mdt-dhcp412:/opt/stack/tempest# python -m testtools.run vmware\_nsx\_tempest.tests.nsxv3.scenario.test\_client\_cert\_mgmt\_ops.TestCertificateMgmtOps Tests running... tempest/clients.py:45: DeprecationWarning: Using the 'client\_parameters' argument is deprecated client\_parameters=self.\_prepare\_configuration()) tempest/scenario/manager.py:50: DeprecationWarning: Read-only property 'manager' has moved to 'os\_primary' in version 'Pike' and will be removed in version 'Ocata' cls.flavors\_client = cls.manager.flavors\_client tempest/test.py:376: DeprecationWarning: Read-only property 'os' has moved to 'os\_primary' in version 'Pike' and will be removed in version 'Ocata' if hasattr(cls, "os"): * NSX|v3: Use nsxlib features list * Adjust qos supported rules to Neutron * NSX|v: Support more than 2 hostgroups * NSX|V: configure correct physical\_network * NSX|V: enhance error message for invalid scope\_id * NSX|V3: treat DHCP server max entries * NSX|v fix some host group placement issues * NSX|V3: Fix exclude port issue during delete port * NSX|V fix FWaaS rules order when router is added to FW * NSX|V Fail dist router set gw if edge not found * NSX-V| add IPv6 link-local address to spoofguard * Tempest:Deploy and Validate Neutron resources using HEAT Template on NSXT|V * NSXv: Use BGP protocol to learn default gateways * NSXv3: Default native\_dhcp\_metadata to True * NSXv: Fix validation for bgp peer 'esg\_id' attr and peer removal * AdminUtils: Fix NSX-V dhcp-edge recreate * NSXV3: ensure all OS ports are added to default section * Stop using CommonDbMixin apis * NSX|V3: fix issues with exclude list * NSXV3: ovs bridge was not getting created in restack * NSX|V: ensure that monitor ID is persisted with LB alg update * NSX|V: enable an external network to create backing network * NSX-V3| Integrate with nsxlib refactored code * Validate L2gateway exists in backend * Update following tempest changes * NSX|V: Ensure that 6.2.x can start with transparent vlan config * NSX|V3: fix exclude list initialization * NSXv BGP: Update edge bgp identifier on router GW updates * NSX|V: provide admin utility to update default cluster section * NSX-V3: Fix qos code in plugin * Use neutron-lib callbacks module * NSX-V| Fix FWaaS rules order * Fix broken unit tests * Revert "NSXv: Don't remove default static routes on edge" * NSXv: Don't remove default static routes on edge * NSXv: Adding notification for router GW port update * NSXv BGP driver: Add missing log expansion variable * NSX|V: be able to deal with more than 256 edges * NSXv: Notify on router migration before removing it from edge * NSX-V| Fix FWaaS deployment on distributed router * NSX|V: fix distributed router interface deletion * Integration with new neutron code * [Tempest]: Adding of timer in between backend operations * [tempest]: Adding of missing function in vmware\_nsx\_tempest * Tempest: Removed old and unused folder * NSXv, NSXv3: Enable address-scope extension * Fix transaction issues with network/subnet facade updates * NSXv BGP support * NSXv: Adding more rotuer driver notifications * VMware:vmware-nsx release note update * NSX-V| Fix Fwaas handling ports * NSX-V| Fix exclusive router deletion * Fix api-replay unittest teardown * NSXv: Adding notifications for router service edge events * NSX|V: check edge ID before locking edge * [Tempest]: Reusing Lbaasv2 cases for nsxv3 plugin also * NSX-V FWaaS(V1) support * NSXv3: Force delete router * NSXv3: Hide client auth password * Fix unit tests * Fix NSX-V qos tests * NSX|V: add in support for DHCP options * Use new enginefacade for networks, subnets * [Tempest]: Reusing Lbaasv2 cases for nsxv3 plugin also * NSX|V3: fix bulk subnet breakage * Tempest: Fixed scenarios for SSH authentication failures * Tempest: Fixed SSH authentication failures * [Tempest] Deploy and Validate Neutron resources using HEAT Orchestration Template * AdminUtils NSX-V| fix sections reorder * NSX-V3| fix devstack cleanup * Drop log translations * NSX-V| Adding datacenter to availability zones config * Fix some reST field lists in docstrings * Use neutron-lib provider net api-def * Prevent non-admin user specifying port's provider-security-groups * Remove logging leftovers * Tempest: Fix for test\_mdproxy\_with\_server\_on\_two\_ls test case * Removing irrelevant note in README file * Skip spawn plugin init methods in unittests * NSX-V3| fix unittests mock * Tempest: Fixed TestRouterNoNATOps bugs and enhanced the test cases * NSXv3: Always clean client certificate in devstack * NSX|V: sync firewall with addition of new ports * NSX-V3| Fix AZ when native dhcp is disabled * NSX-V| improve AZ validation * Updated from global requirements * NSX-V3: add transport zones to availability zones * Remove Tap-as-a-service Support * Tempest: Removed skip test from test\_nsx\_port\_security.py * Tempest: Device driver does not allow Change of MAC address when interface is UP * [Tempest]: Adding of removed method from upstream * NSX-V3: Fix QoS delete * NSXv3: Add support for secure metadata-proxy access * Update api-replay for nsx-v->nsx-v3 migration * NSX-V3| network availability zones support * NSX|V: fix \_vcm parameter * Tempest: test.idempotent\_id is deprecated * Tempest: NSXv3 Logical resource get query cursor fix * Skip configuring integration bride on ESXi compute * [Tempest]: Modified QoS API tests * NSX|V: add in exclusive DHCP support * NSX|V: ensure correct parameter is passed * Fix subnet-deletion issue * NSXv3 Admin: Multiple client certificate support * Fix unit test that uses get\_random\_mac * NSXv3: Add certificate expiration alert * NSXv: Subnet create and attachment concurrency * Pass dhcp\_client in renew\_lease() * Fix OSC plugin global declaration * NSXv: Fix tempest test failures due to KeyError 'primaryAddress' * Fix IPAM unittests * [Tempest]: Adding subnet-pool api testcases * [Tempest]: Added scenario cases for port security feature * Tempest: change test. to decorators.idempotent\_id and addCleanup() * Generalize the availability-zones code * NSX|V: fix host group exception * Remove unconstrained for vmware-nsxlib * AdminUtils: Add utility for config validation * [Tempest]: Changes done to add missing methods from upstream * [Tempest]: Added api cases for port security feature * [Tempest]: Add MAC learn API neg test - port security * Use vmware-nsxlib from master * Use neutron-lib's context module * NSX|V: ensure that DVS name is unique * Fix admin-utils unit test * Tempest: admin-policy scenario basic operation test * NSX|V3: skip random failing test * NSX-T: nsxadmin UTs * NSX-T: Rewrite client certificate unit tests * Switch using exec\_command() directly * Updated from global requirements * NSX|V Fix lbaas l7 reject action * Tempest: Moving from test.idempotent\_id to decorators.idempotent\_id * Updated from global requirements * NSX|V3: Use client cert provider in nsxlib config * Updated from global requirements * NSX|V: remove leftover code from md\_proxy * Updated from global requirements * NSX|V: move migration to correct folder * Fix admin utils unit-tests * Fix DB breakages * NSX-V3: Add support for dhcp-opts extensions for ports * NSX|V: delete old pending-create edges * NSX-V: Improve DHCP edge firewall rules * NSXv3: Rename parent\_tag to traffic\_tag * Fix LBAAS L7 policy upgrade * Enhanced unittests for admin utils * AdminUtils NSX|V: Fix rotuer recreate utility * NSX|V: Support changing the position of LBAAS L7 policy * DVS: Add support for 'direct' vnic types * Don't use Tempest internal methods * NSX|V: add support for VLAN trunk with VLAN network * AdminUtil NSX|v Fix constants import * Switch to oslo\_log * AdminUtils NSXv3: Fix SG admin utils and their documantation * [dvs] Enable vlan-transparent extension * NSX|V: Add support for 'direct' vnic types * NSX|V: remove skipped transparent vlan test * Fix typo in unit test * NSXv| Fix path comparison in lbaas L7 rules * NSX|V: serialize rule creation * NSX-MH: Remove failing unit tests * AdminUtil|NSXv: Add az & db status to edges utils * NSX|V remove plugins vdn\_scope member * Refactor DvsManager code * NSX|V: ensure that the DRS is 'should' and not 'must' * NSX-V3| Qos without RPC notifications * Fix ipam table primary key constraint * NSX|V: conly create host groups is ha is enabled * NSX|V: add in a cleanup method for host-groups * NSX|V: validate that entries exist * NSX|V: enhance admin utility * Remove redundant pass in tests * Replace db get\_session with get\_reader/writer\_session * NSX|V: improve host group management * Fix to use correct config options for image\_ssh\_user * NSX|V: only update host groups for edges that are not distributed * Tag the alembic migration revisions for Ocata * NSX|V: add support for host groups for DRS HA * NSX|V: transparent support for virtualwires * NSX: Add devstack.rst to contain all devstack config * NSXv: Add metadata configuration to the availability zones * Edit config variables in README for TaaS nsxv3 driver * [Tempest]: Adding of Allowed address pair scenario cases * Updated from global requirements * NSXv: LBaaS default FW rule should be accept-any * NSXv: Connect LB interfaces to member subnets * [Tempest]: Added Provider Security Group cases for nsxv * Prepare for using standard python tests * Tempest: Scenario tests for Disable spoofgurad with NSXv * [Tempest]: Adding of Provider security Group cases * Remove support for py34 * NSX|V: add more locks and cleanup edge bindings * NSXAdmin: Add parameters to certificate generation * NSX-v| LBAAS L7 support * Use https for \*.openstack.org references * NSXv: Remove router dependency for LBaaS * NSXV3: Client certificate private key encryption * NSXAdmin: add import and nsx-list commands for client cert * NSXv: Add backup pools ranges to each AZ config * NSX|V: ensure that static bindings are consistent * Fix typo in README.rst * Remove redundant import * NSXv: Fix update port update with provider security-groups * NSXv: Add unit tests for md\_proxy * Stop sending notifications for router update/delete * NSXV3: Initial client certificate auth support * Updated from global requirements * [Admin-Util] NSXv: fix plugin issues * NSXv: Fix backend error handling * NSXv: Edge random placement * NSXv: New way to configure availability zones * NSX|V: remove deprectaed vcns section * NSXv: Support update dvs list for VLAN provider networks * Fix cleanup prints * NSXv: Fix dist router call to add fw rules * NSX cleanup script to clean only related resources * NSXv| Use the current DVS when creating a teaming policy * NSX|V: fix broken unit tests * NSXV3: ensure that mac learning enabled has port sec disabled * [Admin-Utils] delete all backup edges * Tempest: Scenario tests for Provider security group with NSXv3 * NSXv: Fix pool logging calls * NSX|V: set bind\_floatingip\_to\_all\_interfaces to False by default * [Admin-Util] NSX-V|Reorder L3 firewall sections * Remove psutil dependency * NSXv: Subnet DHCP enable/disable with VDR * Updated from global requirements * NSX-V| Fix policy SG errors * NSXv: Do not lock RPC filter update * NSX-V| add firewall rules to dhcp edge * NSXv: lock DHCP edge while making changes * Tempest: Changes done in dhcp\_121 for bug#1797152 * NSX|V: only do SNAT traffic per interface for specific IP's * Use neutron-lib portbindings api-def * Fix vmware\_nsx tempest plugin * Updated from global requirements * Use neutron-lib provider net api-def * Ignore specific backend error for invalid identifier * NSX|V: ensure backwards compatibility for vdr\_transit\_network * NSX-V3| Do not allow adding QoS to router ports * NSX|V: ensure that FW rule updates on edge are locked * NSX|V: add in missing lock(s) * NSX-V| update port port-security flag in all cases * NSX-V3| Fix qos switching profile project name * H402 hacking have been deprecated * NSXv: Reduce DB calls while gathering network edges * NSXv: return subnet edges only when requested * NSXv: recover from database conflicts for VDR DHCP * NSX|V: ensure that metata port cleanup is done * Updated from global requirements * NSX-V3| Validate Qos burst size before rule creation * NSX|V3: ensure that port security is set correctly on backend * Remove pagination skipped tests * Updated from global requirements * Fix QoS tests to use project ID * NSXV+NSXV3: Add support for dns-integration extension * NSX-V3| IPAM support for subnet update * NSX-V| prevent rules creation for SG with policies * Add in skip for breaking test * NSXV+NSXV3: add support for pluggable extensions * NSX|V3 IPAM support * Fix router extra attr processing * NSX|V: set teaming standby ports * NSX-V| Prevent port creation with an existing MAC * Updated from global requirements * NSX|V: ensure that a provider portgroup can be attached to a edge * Fix IPAM drivers entry point in setup.cfg * NSX-V| Fix SG creation with nsx policy * NSX-v3| Update router description on backend * Updated from global requirements * Remove references to Python 3.4 * Using sys.exit(main()) instead of main() * NSXV3: invoke get\_connected\_nsxlib only once per invocation * NSXv3: Allow running devstack without installing OVS on it * NSX-V| Validate default policy configuration if use\_nsx\_policies * Updated from global requirements * NSX|V3 refactor plugin profiles init code * NSX|V: learn the default L3 section ID * Updated from global requirements * NSX|V3: prevent a floating IP being configure on a no snat router * NSXV devstackgaterc file * Use CORE from neutron-lib * Fix TODO in vnic index tests * Fix pep-8 warning of long line * QoS Config: add minimum value of 1.0 to qos\_peak\_bw\_multiplier * Replace subscribe with register for rpc callbacks * Add oslo.privsep and pyroute2 to test requirements * Updated from global requirements * Replace "Openstack" with "OpenStack" * NSX|V Fix router resize for older NSX versions * NSX|V remove security group from NSX policy before deletion * Fix baremetal config options in Tempest plugin * Updated from global requirements * NSX|V3 add default gateway to static binding * Fix firewall rule to allow ping on DHCP edge * NSXv: LBaaS enable acceleration for TCP listener * Tempest: Added MDProxy scenario test cases * Tempest: API tests for Provider security group with NSXv3 * NSXv: LBaaS driver should not maintain member FW rule * NSX|V: do not connect DVR to DHCP edge if not DHCP enabled * Add missing space in config help * NSX|V add RPC listeners endpoints * NSX|V: add configuration variable for dns\_search\_domain * NSX-V: Add support for log level in router flavors * Admin-Util: Create a NSX-v DHCP edge for a network * NSX-V3: Handle pagination in devstack cleanup * NSX|v add IPAM driver to setup.cfg * Populate plugin directory to fix port operation in nsxadmin v3 * Admin-Util: Delete NSX-v backup edges by name * NSX|V update router edge when gateway subnet changes * NSX|v+v3 handle provider sgs in create port sg list * NSX|V: improve support of bulk subnets * Modify MH\_tests Old style class definition * Add edge syslog configuration support to router flavors * Tempest: API tests for MAC Learning with NSXv3 * Tempest: admin-policy API test cases * NSX|V3: fix issues with disabling port security * Create NSGroup for port exclusion * Updated from global requirements * Remove NSGroup manager unit tests * Ignore NotFound response when deleting firewall rule * NSXv: Plugin name constants file invalid * NSX|v fix get\_network\_availability\_zones method signature * NSX-Admin: Add ability to configure loglevel on edges * NSX|v fix security-group policy validation * NSXv3: Removing the use of ns-group manager * NSX|V3: delete DHCP port prior to deleting subnet * NSXv3: Use neutron\_port\_dhcp\_profile for DHCP ports * Remove vim header from source files * NSX|v+v3: Allow multiple provider security groups on port * Using assertIs(Not)None() instead of assert(Not)Equal(None) * Add Apache 2.0 license to source file * NSXv: retry call to create\_port base method * NSX|V3: ensure that latest devstack works * NSX-Devstack: Install vmware-nsxlib from git * Use ExtensionDescriptor from neutron-lib * Admin utility: add in ability to update edge reservations * NSXv3-Admin: fix migrate-nsgroups-to-dynamic-criteria * NSX|V: ensure that sub interface is cleaned up when disabling DHCP * NSX-V add nsx-policies extension * Modify use of assertTrue(A in B) * Using assertIsNone() instead of assertEqual(None) * Remove white space between print and () * Use DB field sizes instead of \_MAX\_LEN constants * NSX-Admin: Support syslog configuration for edges * NSX|V: fix broken unit tests * NSXv: Resume router port delete when edge missing * NSX|V3: ensure that the NSX port name is updated correctly * Don't include openstack/common in flake8 exclude list * NSX|V3: only configure mac-learning if necessary * NSXv: Log an error on DB inconsistencies * NSX|V: configure DVS for devstack if needed * Cosmetic change in unit test * Tempest: Providing support for tenant-delete feature * MH plugin tests - remove directory.\_instance * NSXv3: Change default metadata route to 169.254.169.254/31 * Integration with neutron-lib plugin directory * Add None protection for router and vnic binding objects * NSX|V3: ensure bridge is created if octavia is running * Tempest: Add east west scenario test * Make vmware-nsx capable of handling depends-on * By default, add floating IP NAT rules to each vnic on router * [NSXv3]: Turn off psec for L2GW ports * NSX|V: ensure correct teaming for port groups * NSX|V: cache NSX version number * Tempest: Remove skip decorator for security group * NSX|MH: Fix \_update\_fip\_assoc due to upstream change * Tempest: Fix tenant\_id for nsxv scenario test * Removed redundant 'the' * NSXv3: Fix a package import * NSX|V: admin utility - add in missing teaming values * NSX-V3: Fix security-group logging * Updated from global requirements * Nsxv3: Add admin utility to clean orphaned DHCP servers * NSXv: Make VDR transit net configurable * NSX|V3: update DHCP static bindings when DHCP is enabled * NSXv3: Fix an exception typo * Tempest: TaaS Client for Tap Service and Tap Flow * [Admin-util] NSX|V admin util to use a policy in a security group * NSX|V do not update SG logging if SG has a policy * NSX|V: add in locks for DHCP binding updates * Tempest: Remove deprecated tenant\_id * NSXv3: Catch backend failure during delete\_subnet * Add release notes for NSX-V policy support * NSX|V policy: get SG description from the policy * Add security group policy extension to OSC * Add security group extensions to OSC * OSC integration - port extensions * NSX|V: all calls to NSX are sync - no need to check running jobs * NSXv: concurrent subnet creation bugfix * NSXv3: Add error handling for SQL timeout case * NSXv3: Fix DHCP upgrade script * NSX|V: fix typo * NSXAdmin: Ignore response codes when issueing a delete fw section req * Updated from global requirements * Use neutron\_lib converters instead of neutron * NSX|V support security groups rules with policy configuration * NSX|v+v3: Allow multiple provider security groups per tenant * NSX|V - initial support for NSX policy * Use L3 constant from neutron lib * NSX|V3: fix path for exceptions * Updated from global requirements * NSXv3: Native DHCP is not supported for non-overlay networks * Integration with nsxlib * Updated from global requirements * NSX|v QoS fix DSCP rule creation * NSX|V QoS fix shaping direction * Updated from global requirements * Prepare for neutron-lib DB support * Fix osprofiler breakage for admin utils * NSXv3: Fix checking DHCP switching profile in admin utility * NSX|V: fix validations for non-ascii characters * OCS plugin + initial extensions support * NSX|v+v3: QoS BW translations should be rounded * Updated from global requirements * Use Port list type in the NSX configuration * Remove deprecation warnings * Raising proper error in case of router-interface addition * Updated from global requirements * NSXv3: Clean up pending entries when create\_router failed * Tempest: Add router NoNAT scenario tests * NSXv3: Enhance exception handling in create\_subnet\_bulk function * Updated from global requirements * tempest: lbaas l7-switching API tests * Tempest: NSXv3 Native DHCP Negative Test * Use compare\_elements from neutron\_lib * NSX: remove depracted DB warnings * NSX|V3: ensure race is prevented when attaching network to router * Tempest: Add tests to cover Native DHCP * tempest: lbaas l7 switching scenario tests * NSX|V3: ensure that the mac learning profile exists * NSXv3: Fix allowed address pairs switching profile * NSXv3: Create logical router after neutron router is created * Fix update dhcp bindings * NSXv3: Fix mac learning init bug if nsxv3 is 1.0.x * NSX|V3 fix nsxlib raised error with managers * NSX|v3 replace dhcp profile and metadata proxy uuids with names * Stop adding ServiceAvailable group option * NSXv3: Add plugin-specific create\_subnet\_bulk function * Include alembic migrations in module * Updated from global requirements * Enable release notes translation * NSXv3: Fix MAC Learning Profile POST API * NSXv3: Fix typo in cluster reinitialization * NSX|V3: enhance exception handling * devstack: fixed backend cleanup during unstack * NSX|V3 update client with max attempts * NSX|V: validate that a flat network is configured correctly * NSX|V: remove unused parameter * NSX|V: add context to missing configure\_router\_edge * NSX|V: add missing contexts * NSXv3: Fix NSGroupManager initialization test * Replace retrying with tenacity * Updated from global requirements * nsxlib refactor: config + neutron deps * nsxlib refactor - add hierarchy to the nsxlib apis * nsxlib refactor - remove cfg usage * Updated from global requirements * nsxlib refactor continue * Tempest: Change parameters to accommodate to tempest master * Fix broken flow classifier tests * Tempest: router\_size create and update tests * Tempest: Update nsxv3\_client to query more than 1k * Tempest: Add Native DHCP UniScale Tests * Add native DHCP config in nsxv3 sample local.conf * Updated from global requirements * NSX|v AdminUtil list dhcp-bindings on missing edge * NSXv3: Fix attachment setting during create\_port and update\_port * Tempest: Add back addCleanup\_with\_wait * NSX|v: Fix shared router clear gateway * Remove deprecation warnings * Fix broken unit tests * NSXv3: Fix problem when reset lport attachment * TrivialFix: typos in cli.py * TrivialFix: typos in client.py * NSX|V3 support different credentials for the NSX manages * Tag the alembic migration revisions for Newton * Add api-ref in MD format * NSXv: Change metadata port when router is deleted * NSX|v fix router migrate with metadata * Updated from global requirements * NSX|V3: ensure that octavia ports receive DHCP addresses * NSX|V3: Fix update\_subnet issue * NSX|V3: Delete DHCP binding if nova instance is deleted * NSXv: DHCP reconfigure on VDR interface removal * NSX|V3: ensure Mac learning has port security disabled * NSXv: create worker pool on new context * NSXv3: Don't advertise NAT routes in NoNAT case * Fix broken unit tests for python 3 * NSX|V3: Add retry logic for deleting logical router port * Updated from global requirements * Add release note for native DHCP/Metadata support * NSXv: Configure metadata when detaching from VDR * NSX|MH: add in deprecation warning * Fix broken unit tests * [Admin-util]: Add support to update resource pool ID for NSXv edges * NSX|V improve validate\_network performance * Fix test\_migration import * Adding release notes for new feature - provider security-groups * Fix broken unit tests - add project\_id * Tempest: Fixed error with nonexist module * NSXv3: Add support for trunk service driver * Remove deprectaion warnings for db models * Update reno for stable/newton * NSX|V3: check if subnet overlaps with shared address space * Updated from global requirements * Add releasenotes for NSXv3 TaaS driver * NSX|V fix IPAM driver log message format * Add release notes for the Newton features * NSX|V: make DHCP DB binding creation more robust * Fix Admin utils tests - resources registration * NSX|v: do not resize a router if not necessary * NSX|V remove duplicate log message at edge\_utils * NSX|V: ensure that log message does not cause exception * NSX|V fix router\_binding az default value after migration * NSXv: use contexts correctly while using threads * [NSXv3]: get\_floatingips filter must pass a list of ports * NSX\_V3: always set the metadata proxy in nova.conf * NSX|V3: Add relatedErrors in the details of ManagerError exception * NSX|v IPAM support for external & provider networks * NSX|V3: ensure that the NSX credenatials are set for devstack * NSX|V: enable port group to belong to a spoofguard policy * Fix more backup edges at the backend * NSXV3-devstack: added parameter -1 to curl command * Use model\_base from neutron\_lib * NSX|V router flavor support * NSX|V3: Fix connected routes not advertised * Fix provider sg delete by non admin and non admin rule change * Fix failing unit tests from neutron changes * NSX|V: return default AZ if name AZ not found * NSX|V3: be more robust under port name update * NSX|V3: ensure that variables are assigned * Admin util: remove deprecation warning * [Admin-Utils] NSX-V3 upgrade vm ports after migration * [NSX|v3]: L2gateway fails to start when Bridge cluster configured * NSXT: Adding a script to set global firewall config autodraft option * NSXv3: Fix tap-flow-create to use floating IP of destination port * Updated from global requirements * NSX|V: remove invalid parameter from context * NSX|V3: do not disable native DHCP when subnet is not empty * Fix fetching dvportgroup name * NSXv: remove LBaaSv1 code * NSX|V3: Update upgrade scripts for native DHCP/Metadata * NSX|V3: Remove unnecessary debug message * Remove lbaas migrate utility * NSX|V3: Enhance add router interface failure handle * NSXv: recover from bad VDR DHCP bind case * NSX|V delete metadata ports upon deleting the dhcp edge * Tempest suite file for various versions of VIO testing * Pull out dhcp and metadata tests to their own file * NSX|V3: Add codes to handle corner cases in native DHCP support * Fix tempest.conf generation * NSX|v Fix router type update to not update all the attributes * Update tox.ini for upper constraints * Tempest: Add native DHCP methods in NSXv3 client * nsxv3: refactor test\_plugin TestNsxV3Utils * remove some db method access from nsxlib code * NSX|V: do a retry for interface update * NSX|V: do not rename edge if not necessary * NSX|V3: remove backend intercation from DB transaction * Change the lock trace message * NSXv: LBaaSv2 shared pools * api\_reply: migrate routers static routes * api\_reply support for QoS migration * api\_reply: NSX-v support + activate tests * [dvs] Validate network name for portgroup networks * Expose advertise\_static\_routes prop from router api * Updated from global requirements * Remove work-around for failed l3 test * NSX|V: retry on failed virtual wire create * NSX-V service insertion fix callback registry * NSX|V: ensure that metadata works with 'internal' ipam * Fix broken unit tests * nxv3: mass refactor of nsxlib * NSX|V3: validate if 'destination' exists * NSX|V3: only run cleanup for devstack if q-svc is enabled * Make it possible for DvsManager to manage its own DVS * Change native DHCP/MDProxy log messages from info to debug level * Remove unused members arg frm add\_router\_link\_port * Fix failing L3 test * Tempest: Removed bug decorators, renamed obsolete def * Remove deprecation warning - security group db * NSXv3Admin: Fix mismatches for security-groups * Tempest: Add dvs specific test cases * Tempest: Providing support for dhcp-121 feature * Enable DeprecationWarning in test environments * call correct stop method * NSXv - Support provider security-groups * QoS integration - callbacks should support a list of policies * [NSX|V]: Fix add\_router\_interface for shared router driver * NSXv: eliminate task use from edge deletion * Updated from global requirements * skip failing l3 tests for now * Tempest: Network tags clients, CRUD and Filter testing * [dvs] set physical network as dvportgroup moid * NSXv3: Raise the proper exception if nsgroup doesn't exists * Admin util: use correct config options * Use neutron-lib add\_validator for registration * Rename tenant to project changes * NSX|V3: update the nova API with the metadataproxy secret * Updated from global requirements * Tempest: Update nsxv3 scenario suite * Updated from global requirements * NSXv: duplicate code * NSX|V3: ensure that the manager, user and password are set * NSXv: eliminate task use from edge update * NSXv: eliminate task use from NAT update * NSX|V refactor create\_dhcp\_edge\_service to avoid code duplication * NSXv: eliminate task use from update routes * nsxv3: provider security groups * Provider Security groups * NSXv: eliminate task from edge rename operation * NSXv: eliminate task use from edge creation * Update models to use the base model for tenant details * DVS: provide a dhcp\_driver class for plugin * NSX|V3: Delete backend DHCP servers during devstack cleanup * NSX|V3: Make metadata route configurable for native metadata service * NSX|v Metadata proxy handling edge problems * Fix xenial pep8 problems - add translations * NSX|V remove async calls to backend * NSX|V add dhcp-mtu extension to subnet * NSX|V3: Enable service\_metadata\_proxy for native metadata service * [NSXv3]: Add support for L3SPAN * NSXv: use synchronous call for firewall update * Updated from global requirements * NSX|V3: Fix delete\_network issue with native DHCP * NSX|V3: fix issue with OVS manager configuration * NSXv - dispose unused code * NSX|V: don't throw exception when same vnic is configured * Update CIF creation request body * NSXv - eliminate task use from delete\_interface * NSXv - log the call stack while locking * DVS Plugin: Add Support for updating a network * Tempest: Providing support for disable spoofguard feature * NSX|V Distributed router PLR creation with availability zones * NSX|V3: configure ovs manager according to NSX version * NSX|V Use configured appliance size for PLR routers * NSX|V3: configure devstack for native DHCP and metadata support * Override default value of Q\_USE\_PROVIDERNET\_FOR\_PUBLIC * Remove discover from test-requirements * Fix broken unit tests * NSX|V: remove validations for AZ's * Service Insertion remove networking-sfc import * NSX|V3: make use of agent socket * NSX|V+V3: Fix QoS peak bandwidth calculation * Make Unittests pass * NSX|V add edge\_ha per availability zone * NSXv: check bindings validity in route methods * NSX|v Add default availability zone to l2 gateway router creation * [Admin-Util NSX|V] availability zones support * NSXv - metadata status in admin utility * [Admin-Util NSX|V] add more information to backend edges list * [Admin-Util] recreate NSX|v router edge * NSXv- Exit while updating inteface on invalid edge * Updated from global requirements * Updated from global requirements * NSXv - LBaaSv1 to LBaaSv2 migration * NSX|V3: Add user-friendly names for backend DHCP/MDPROXY entities * Fix perodic failing unit tests due to sorting * QoS unit tests - fix deprecation warning * Move the get\_router ops out from subtransaction * Updated from global requirements * NSXv - validate that router binding was found * Raise exception when put CIDR into address pairs * NSX|V Extend Availability Zones to support data stores * NSX|v service insertion handle upgrade * NSX-V Service insertion support * Integrate QoS constants change * Update gate logic * [Admin-Util] list/delete orphaned backend edges in NSX-v * NSX|V unit tests - return edges names in fake vcns get\_edges * [Admin-Util] list missing backend networks for NSXv * [Admin-utils] NSXv recreate DHCP edge * Fix periodic falling test * LBaaSv2 foreign keys * NSX|V: add in edge resource configuration support * NSXv: Make router exception more informative * Remove white space between print and () * NSXv - add timestamps to NSXv driver tables * Update Admin-Util RST file with missing/incomplete apis * Updated from global requirements * Add Python 3.5 venv and classifier * Fix README file for better readability * NSX|V3: minor fixes for native DHCP support * [NSXv3]: Tap-as-a-Service NSXv3 driver * Updated from global requirements * NSXv: do not fail on spoofgaurd policy error * The policy file did not take effect in devstack env * Fixed typo in policy rules * Replace raw\_input with input to make PY3 compatible * python3: make unit tests pass * NSX|V - fix exclude list error handling * NSXv: Fix failure in lbaas edge selection * [Admin-Util] fix plugin object in nsxv dhcp-binding util * Admin utility RST file * Show statistics after running coverage * [Admin-Util] add missing edges to nsxv backup-edges list-mismatches * Admin utility: provide possible teaming values * Remove tenant\_id from parent model * [Admin-Util] Fix bad input handling * Admin utility: define choices and resoucres per plugin type * NSX|V: only update firewall if router binding exists * NSX|V: fix conflicting IP's when more than one subnet defined * NSX|V remove vnic from spoofguard only if port-security is enabled * NSX|V: don't fail port deletion if static binding deletion fails * Fix model migration sync tests * Tempest: Support lbaas api & scenario tests to run against upstream * Add testresources to test-requirements * Add in missing test-requirement * Updated from global requirements * Fail silently when deleting security-group rule * NSX|V: ensure route update is aromic for shared routers * NSX|V: don't fail when router binding does not exist * NSX-V: Re-raise exception if failed to assign security-group to port * NSX|V: fix edge case with admin utility and spoofguard * DVS plugin - fix type * NSX|V: retry for network deletion * NSX|V: don't log eception when edge is not active * NSXv3: Support CH nsgroup membership using dynamic criteria tags * [Admin-Util NSX|V] update the data stores of an existing edge * NSX|V: add in missing lock for updating nat rules on edge * NSX|V: address DB lock wait timeouts in the plugin * Unit test for nsx|v + nsx|t admin utils * [Admin-Util] cleanup to avoid crashing in extreme cases * Tempest: Fix upstream patch 32049 which replacing oo-wrap * Use AFTER\_INIT in lieu of AFTER\_CREATE * NSX|V3 utility to identify CrossHairs version * [dvs] support 'portgroup' provider type * NSX|V tests - add missing fake vcns api & fix existing * NSX|v3 fix MAC learning exception format * Skip QoS update while creating network during init * Ensure that \_ does not override translation function * Rename URL components for native DHCP support * Prep for pbr warnerrors * Updated from global requirements * NSX|V3: import conditional mock * NSX|V3: Add support for native metadata proxy service * NSX-V support updating port-security of a port * NSX|V: prevent exception with router deletion * NSX-v QoS - fix refactor integration to use the correct api * NSX|V: fix broken unit tests * Updated from global requirements * NSX|V3: mac learning support * NSXAdmin: Update metadata shared secret * NSX|v HA: deploy edges on 2 datastores * api\_replay: remove unneeded entry point to file * [Admin-Util] add neutron config init to the admin utils * NSX|V3 fix get\_ports when retrieving only specific fields * Revert "Temporarily disable tempest plugin" * Tempest: fixed upstream remove network\_resources from sceanrio * Tempest: Use client result for Micro-Segmentation * Tempest: Use network client result instead of OO * NSX|V3: Add support for native DHCP service * Make exclusive router size updatable * NSX|v unit tests: fix fake\_vcns get\_section\_id * NSX|V add vm to exclude list when the port has no port security * NSX|V: validate GW information is correct when attaching router * Temporarily disable tempest plugin * Can't set gateway on no subnet network * Fix add same network on different type routers failed * Ensure that ListOpt configuration variables are set * [Admin-Util][NSX-v3]: fix ports mismatch admin util * Remove POT file * Make NSX plugins independent of devstack:lib/neutron-legacy * NSX|V3: start\_periodic\_dhcp\_agent\_status\_check( is deprecated * NSX|V: fix unit test failures * Fix broken unit tests * Tempest: NSX-v external network supports multiple subnets * Tempest: Providing support for dhcp-121 feature * Rename edge appliance before its deletion * Tempest: QoS clients and API CRUD operation tests * NSX|V: use correct lock for dhcp vdr binding * LBaaSv1: Delete LB objects when backend is broken * LBaaSv2: Delete LB even when backend is broken * NSX-V: support qos policy in network get * [Admin-Util][NSX-v3]: list routers which are missing from backend * Fix broken unit tests * Updated from global requirements * Updated from global requirements * [Admin-Util][NSX-v3]: validate ports switch profiles on backend * NSX|V: use correct logging type * Updated from global requirements * NSX-v3: Initial framework for api-replay-mode * Add neutron-api-reply cli tool * NSX|V handle duplicate hostname binding error * Upstream broke Qos unit tests * Update name of backend ports when router name is changed * NSX-V3: support qos policy in port/network get * [Admin-Util][NSX-v3]: list ports which are missing from backend * [Admin-Util][NSX-v3]: list networks which are missing from backend * Updated from global requirements * QoS refactor required changes * Ensure migrate script pass on newer MySQL * Tempest: Change external network to public network * NSX|V3: ensure that a VLAN network cannot be added to a router * NSXAdmin: update member IPs in metadata LB * NSX|V network creation with availability zones hints * NSX|V router create with availability zones hints * NSX|V: only update NSX if neceesary for router update * remove expire\_all in getting backup edges * NSXvAdmin: Fix mismatches for security-groups * Updated from global requirements * Fix selecting same backup edge from pool * Tempest: Change tenant to project * Add sample local.conf for nsxv3 * Tempest: Add micro-segmentation scenario test * Tempest: Format vmware\_nsx\_tempest README * NSX|V rename distributed router plr edge when router is renamed * Add README for the NSX QoS service * Enhance getting valid router bindings * NSX|V fix Tasks logging to not crash on non-ascii characters * NSX|V3: add in a method for getting the NSX version * Updated from global requirements * Fix distributed router rename error * Remove ref to oslo log verbose * NSX|V: be proactive with DHCP binding conflicts * NSX|v remove unused dhcp functions from edge\_utils * Tempest: lbaasv2 scenario http round-robin operation test * NSXv3: Clean up logical port for any nsgroup association error * NSX|V3 QoS DSCP marking support * NSX|V: ensure locking when detacing router interface * [NSX|V|V3]: Refactor NSX-V L2 Gateway driver * Tempest: Add nsxv3 api and scenario test suites * Remove deprecated warnings for neutron\_lib * tempest-api-network updates * NSX|V3: ensure no dangling networks * NSX|V rename backend edge name when router is being renamed * Cleanup script: fix typo * NSX|V3 QoS: handle illegal max bandwidth * fix deleting network error with multiple subnets * Multiple external subnets support * Reorder exclusive router delete process * [NSXv3]: Refactor v3 L2 Gateway driver * Updated from global requirements * NSX|V don't crash unattached router port update IPs * NSX|V fix deadlock while updating router gateway * NSX|V: fix broken unit tests * NSX|V Set router status to error if edge\_update fails * NSX|V3: ensure that non Overlay network cannot be added to a router * NSX|V: validate result before return IP * NSX|v fail adding external subnet/port as a router interface * Backup edge put enhance * Fix tempest breakage * NSX|v routers: remove redundant calls to backed fro static routes * Updated from global requirements * [Trivial] Remove unnecessary executable privilege * Fix creating portgroup provider net with no physical\_network set * NSXv: Use locking when updating a port * Tempest: Fix py3 indexing issue on dict\_keys * NSX|V: save backend calls when creating DHCP binding * Router intf/gw error enhance * NSX|V3 Delete unused QoS plugin * NSX|v QoS DSCP marking support * FIP firewall rule is missing * Switch to using hacking checks from neutron-lib * [Tempest]: dns-search-domain scenario/negative tests * Tempest: Skip security group tests because of bug * NSX|V Use requests module for HTTP/HTTPS * NSX|V3 add QoS support for networks * [Admin-util][NSXv3] Fix help message for secgroups * [Admin-Util]: Add error handling to nsxv update\_dhcp\_edge\_binding * NSX|V3 add qos support for ports * [Admin-Util] Add error handling to nsxv update\_switch admin utility * Set new default password that vdnet is using * Tempest: Added L2GW API tests * Updated from global requirements * ADMIN: fix confusing error message for spoofguard * NSXv: mock spawn\_n when runnign unittests * Fix a typo in nsx\_v3 update\_resource mocked call * Admin utility: ensure that the router is defined * NSX|v3 replace configuration uuids with names * Updated from global requirements * NSXAdmin-v3: Don't delete internal fw sections and groups * NSXv: Remove redundant code to check for duplicate rules * Revert "NSX: remove usage of ovs\_use\_veth" * Change default backup edge size to compact * Clean edge vnic bindings for a clean backup edge * Tempest: Change tenant prefix to project * [Tempest]: initial lbaasv2 clients and API tests * Enhance dhcp service error handle * Enhance update edge error handle * Automatically generate vmware-nsx configuration files * nsx\_v3: Allow security group rule id to be specified * [Admin Utils] Added missing return to get security group * NSX: remove usage of ovs\_use\_veth * NSX|V log warning when getting a router-binding entry with bad status * NSX|V3: fix test imports * Add nsxv3 tempest test suite * NSX\_V3: treat logical port mapping integrity issues * Fix dhcp lock error when update same edge * NSX|v limit access to metadata service to specific protocols * NSX|V prevent adding static routes to shared routers * [Tempest: use project instead of tenant * [L2-gateway]: Fix l2gw plugin due to networking-l2gw changes * [NSX-v3]: Fix L2GW connection-create * Updated from global requirements * Remove vmware-nsx's static example configuration file * NSXv: Enhance edge deploy failure handle * NSX|V add qos support for networks * [Admin-Util]: List networks associated with missing edges * LBaaSv2: Delete fails while LB in ERROR state * [Admin-Util]: Fix tabulate results method * NSX|V3: separate the neutron network id from nsx network id * LBaaSv2: Fail when no router found for subnet * Change async static route call to sync * Keeping the load balancer firewall on edge * [Admin-Util]: Add support to list missing edges for NSXv plugin * NSX|v update edge device when the user changes the port ip address * NSX: do not block init with security group logging configuration * NSX|V3: enable plugin to use existing ID's * fix failing pep8 job * subnet host route support * Add dhcp metadata host-route support * [NSX-v]: Add support for multiple DVS for VLAN network type * Remove attribute not specified before checking duplicate sg rules * [NSX-v]: Validate edges while subnet create * NSXv3: Adding support for 'secgroup-rule-local-ip-prefix' extension * NsxV3: Fine grained logging for security-groups * NSXv: Fine grained control for logging security-group rules * Checking load balancer before removing router interface * NSX|MH: remove tests that break the gate * Add the metadata lb back * Enhance update routes at the backend * Fix network attached to two distributed routers * NSX|V3: remove redundant warning * NSX|V3: Optimize finding metadata port * Optimize get\_networks function in all plugins * Tempest: Adding l2gw test * Remove useless edge cluster uuid * Add option to expose dvs features on nsxv plugin * NSX|V3: pass 'nsx-logical-switch-id' as part of the port details * Tag the alembic migration revisions for Mitaka * clean tier0 ports created from nsxv3 * Add debug message for REST call reply * Tempest: Use routers\_client in router test * Fix security-group bulk rule creation * Tempest: Add external\_network\_cidr in config * NSX|V: upstream broke us * Tempest: Use data\_utils from tempest.lib * Admin util: add in option to set the DVS teaming policy * Fix deploying edge concurrently failure * Tempest: fix broken tempest tests * Translations: ensure that the locale directory is created * NSX|V3: Change default value of metadata\_on\_demand to False * Tempest: Add multi-hypervisor scenario test * Used warning instead of warn * NSXv3: Avoid AttributeError in create\_security\_group exception handling * remove unneeded param from \_create\_port\_at\_the\_backend * NSXv3: Update existing default firewall section on init * NSX|V: increase default retries * Fix cfg\_group not found in dvs plugin * Add missing translation to LBaaSv2 listener * NSXv: Better exception handling when failing to create a secgroup rule * Register extending function for security-group rule extension * urlparse is incompatible for python 3 * NSX\_V3: ensure that DHCP works with multiple subnets * Add extension fields on network and port create * NSX|MH: rename qos extension to qos\_queue * Extending security-group ingress rule * Updated from global requirements * Don't rely on unique names to initialize default backend resources * Tempest: change tempest\_lib to tempest.lib * Insert new security-group FW sections at the bottom and not at the top * Allow use of port 22 for LBaaS VIP * Translate LBaaS TERMINATED\_HTTPS to NSX equivalent * NSX|v3 fail create\_port for external network with device owner compute * NSX|v3 update\_port on backend only if it was created on backend * NSX|V: improve get\_version method * NSX|V3: Add support for vxlan in provider:network\_type * Change length of cert\_id field to 128 * NSX|V3: fix broken unit tests * NSX|V3: use oslo\_serialization instaed of json import * NSX Admin: Add support for NSXv Security groups * NSX: make use of neutron\_lib constants * Fix router intf port deleted when are in use * Updated from global requirements * [Tempest]: fix upstream remove commands module * NSXv: fix broken unit tests * Skip test\_create\_security\_group\_rule\_icmpv6\_legacy\_protocol\_name * NSX|V: fix broken unit tests * NSX: Enable resource tracking in NSX plugins * nsx-v3: Configure interface and route to external network * Tempest: Add placeholder for common tempest tests * Add internal metadata network on demand * nsx\_v3: Move db access in update\_port under same update transaction * Apply routes on VDR's PLR when no-snat is enabled * Updated from global requirements * Revert "Move router db call ahead of backend call" * Move remove\_router\_interface\_info db call ahead of backend call * [Tempest]: NSX-v dhcp service is not reachable * Fix vdr interface deletion sometime failed error * Revert "NSX-T separate the neutron network id from nsx network id" * NSX|V and NSX|V3: add in support for 'subnet\_allocation' extension * NSX|V fix broken unit tests * NSX: make use of neutron\_lib exceptions * Updated from global requirements * [NSX-v]: Use oslo\_serialization for jsonutils * Don't assume backend resource was created if no ManagerError raised * nix.ini: fix typos * Reorganize locking for NSXv * NSX-T separate the neutron network id from nsx network id * Multiple Transport Zones Scenario Tests * Initial release of DNS search domain API test * NSXv: Edge firewall default timeout should be 7200 * Admin util: add support to get network morefs * NSXv3: Add missing config option details to nsx.ini * NSX-v3 reinitialize cluster on fork * NSX-v3 disable psec per port * NSXv: Add DNAT rules for the interfaces of the shared routers * Add force=true option to NSgroups DELETE call * Differentiate between StaleRevision error to other manager errors * NSXv: Place LB only on tenant's router * Fix log exception * Remove deprecated warnings * NSX|V3: Remove Neutron port if failed to add port to security-groups * NSXv - allow changing the router type exclusive <-> shared. APIImpact * NSX|V3: Remove neutron port if failed to create backend port * Admin util: fix spoofguard issues * NSX|V: ensure that gateway network has a subnet * nsx-v3: remove old FIXME comment * NSX v3 devstack cleanup invalid call * Separate NSX backend transactions out of neutron DB transaction * NSX|V3: Fix floating IP status * Resolve NetworkInUse race condition * Additional debug for NSX v3 cluster * NSXv3: Retry to remove NSGroup member for any ManagerError * Updated from global requirements * Do not exclude flake8 checker for devstack directory * Multiple Transport Zone API tests * Updated from global requirements * NSX|V3: Update router name on NSX * Avoid UnboundLocalError: local variable 'lport' on external networks * NSX|V: ensure that DHCP config is updated synchornously * NSXv admin util - cleanup edge vnic bindings * Add nsxv3 delete router test * Consolidate branch setup for dependencies pulled from git * NSX-v3 multi-manager round robin scheduling * NSX-v3 update endpoint state only on timeout * Updated from global requirements * NSX|V: add ability for admin to configure default nameservers * Revert "[NSXv]: Push description to NSXv edge during creation" * NSXv: raise exception for reserved subnets * Unblock the gate * Change imports for IP\_PROTOCOL\_MAP * NSX-v3 sensible HTTP connection defaults * [NSXv]: Fix multiple tz subnet create * Add nsxv3 security group test * Add bandit security linter * NSX-v3 HTTP retries conf property * [NSXv]: Add support for dns search domains in NSXv plugin * NSX-v3 proxy exception handling * Add nsxv3 floating ip temepst test * Add method to get firewall rule in nsxv3 client * [NSXv]: Push description to NSXv edge during creation * Add nsxv3 router tempest test * Better error message when reaching security-group maximum capacity * NSX-v3 http read timeout * Follow the convention * Files contains test lists for regression test execution * Run selected api network tests * Negative tests for Multiple Transport Zones * Add nsxv3 client for vmware\_nsx\_tempest * Update translation setup * Add nsx networks test * Add NSXv3 config for tempest * [AU]: Add command to modify edge appliance size * NSX|V: fix broken unit tests * Instruct tox to ignore import exeption on vmware\_nsx\_tempest.\_i18n * Add retry logic when deleting logical port and logical switch * Updated from global requirements * Remove deprectaed warnings * NSX|V: ensure that DHCP bindings are deleted * Updated from global requirements * Always set advertise\_nat\_route\_flag True for FIP * move devstackgaterc file to devstack/nsx\_v3 * Fix LBaaSv2 logging * Add external DNS driver mixin to VMware plugins * Enable availability zone for network * Fix broken unit tests * Add placeholder for NSXv3 tempest tests * init release of vmware-nsx tempest tests in tempest external plugin * Updated from global requirements * Updated from global requirements * Do not specify vnic index while setting DGW * Fix unit tests failures * Address pair validation for NSX v3 plugin * Define has\_neutron\_plugin\_security\_group using override-defaults * NSX|V3: ensure that port update allows traffic * NSX|V3: fix the router tags for uuid * Fix OS Default FW section apply\_tos * LBaaS Layer4 TCP VIP should use LVS * Rename badly named edge appliance * Truncate edge appliance name when too long * Updated from global requirements * Fix plugin(s) following upstream changes * Use a retry when adding or removing NSGroup members * NSX|V3: ensure that a resource is used when creating a short name * NSX|V3: fix network name update to include UUID * Updated from global requirements * NSX|V3: add tags for the T0 and T1 ports * nsx\_v3: delete security group rule from backend first * NSX|V3: add tag for instance id if possible * NSX|V3: provide better names for ports on the backend * Updated from global requirements * Explicitly call \_ensure\_default\_security\_group before creating a new SG * Adopt incremental add/remove member API for NSGroup * Insert FW rules at the bottom instead at the top * NSX\V3: use 'OS' as the prefix for the nested groups * Delete i18n\_cfg file * LOG.warn -> LOG.warning * NSX|V3: ensure that tag length does not exceed 40 characters * [NSX-v]: Update existing vague exceptions * [NSX-v]: Introduce a more user friendly exception * [NSX-v]: Validate DNS search domain values at API level * Making the number of nested NSGroup configurable * Delete all NS-Groups when call unstack.sh * Locking when initializing the NSGroupManager and the default FW section * NSX|V: dvs\_id is optional and not mandatory * NSX|V3: rename cleanup script for devstack * Ignore NS-Groups that have no "tags" * Admin util: verify that a backup edge is a backup * NSX|V3: remove double import * NSX|V3: Rename logical port with router attachment * NSX|V3: Rename tag in logical port mapped to neutron DHCP port * NSX|V3: Rename tag in logical port with router attachment * NSX|V3: Add tags for DownLink logical router port * NSX|V3: fix short-name notation * NSX|v3: Scaling security-groups by using multiple nested groups * Fix attach specific router port failure * Removing manually edge members placment codes * Updated from global requirements * Update pool erases member config * Fix parameter list for create\_dhcp\_bindings() * [NSXv3] Add tags to qos switching profile * Fix potential infinite loop during add\_router\_interface * [NSXv3] Add os-project-name tag * NSX|V3: add in tag resource * NSX|V3: Rename logical router * NSX|V3: Rename logical router port * Move metadata proxy processing out of DB transaction * Move python\_nsxadmin out of a top-level namespace * nsxv3 multi manager:fix backend cleaup * NSX|V3: ensure that router id is updated prior to tag creation * NSX|V3: provide a unique name for the network on the backend * Set logical switch os\_neutron\_id tag * NSX|V3: ensure that the DHCP switch profile is not too permissive * NSX|V3: fix tags for internal resoucres * [NSXv3] Clarify error regarding missing tier0 UUID * NSXv3: fix edge HA doesn't work * Add metadata proxy support in NSX/T plugin * Fix delete\_port case in handle\_port\_metadata\_access * Fix multiple subnets attached to router * Updated from global requirements * Clarify usage of NSXv3 default UUID settings * NSX|V3: add in missing tests for portbindings * Fix attach second subnet on router failed * NSX|V3: add in missing support for host\_id update * Updated from global requirements * Deprecated tox -downloadcache option removed * Updated from global requirements * Rename os-tid tag to os-project-id * Admin Utility: Add orphaned-edges resource * Admin Utility: Minor fixes for output formatting * Admin Utility: Update DHCP binding for NSXv edge * NSX|V3: improve configuration names * Make sure correct branch of neutron is pulled in * Admin Utility: Add command for delete-backup-edge * Rename neutron-id tag to os-neutron-id * NSX v3 multi-manager * Skip updating logical port when removing router interface * Remove code that is no longer used * Updated from global requirements * Bugfix: Add translation hints * Stop creation of port at the backend in case of failures * Use the correct \_ from vmware\_nsx.\_i18n file * Updated from global requirements * nsx\_v3: remove unneeded call to nsx\_db.get\_nsx\_switch\_and\_port\_id * Fix custom conf files referencing * [Admin Utility]: Add command for list-backup-edges * Install the tools folder on vmware\_nsx install * [Admin utility nsxv3] Fix import error * Updated from global requirements * Add reno for release notes management * Admin Utility: Fix output for missing edges and spoofguard policy * NSX|V#: add in missing log hint * Add tag for os default dfw section * Fix spacing for help string * Admin util should work from any dir * Add enum34 & PrettyTable dependencies[admin util] * Cleanup python-nsxadmin dir * Use prettytable and rename python-nsxadmin - to \_ * [AU]Fix help message for supported ops on resource * Switch to internal \_i18n pattern, as per oslo\_i18n guidelines * NSX|MH: unblock gate * Added cleanup for switching profile in unstack * NSX|V: remove exceptions when running unit tests * NSX|MH: fix broken unit test * NSX|MH: unblock the gate * Add nsx-update to supported ops * [NSXv] Add SSL support for metadata service in NSX-V plugin * Fix stale dhcp bindings left after VM stress test * Fix translation file names * Remove deprecated parameters * Setup for translation * Add execution time to backend transaction logging * Fixes typos * NSX|V: add locking for interface management * NSXv: Change edge\_ha flag on edge updates * Check if l2gw has been used before creating l2gw-connection * [NSXv]: Add support for multiple transport zones * Use config choices for replication\_mode option * Fix network not detached from vdr dhcp edge * Updated from global requirements * [Admin Utility]: Add command to enable NSXv Edge HA * Fix typos with topy * Random cleanups * Updated from global requirements * Use choices for exclusive\_router\_appliance\_size * Adding unittests for security-group implementation * Updated from global requirements * NSXv3: Exclude tempest tests expected to fail * Fix unittests for port-security and security-groups * NSX|V: fix broken using tests * Remove an invalid parameter in logical-router-port REST request * Fix attach logical router port failure * Add NSX\_L2GW\_DRIVER for NSX-v plugin * NSX|V3: treat stale exception on port update * Updated from global requirements * Explicitly remove nsgroup form the nsgroup container on secgroup delete * Revert "Security Group integration scale up" * NSX|V3: ensure that rate limits are disable for the DHCP profile * Support L2 gateway edge HA according to the edge\_ha setting * Enable global advertisement status flag * Log returned data from NSX manager * NSX|V: ensure that spoofguard policy is port update * Fix Edge appliance rename failure * Fix up broken unit tests * NSXv: Check router-type before router-size * Use PortOpt instead of min/max on IntOpt * Security Group integration scale up * NSXv3: Fix typo in URI while setting GW for router * NSXv: Add method to get dhcp bindings count per edge * Fix indentation * Admin utility: List missing DHCP bindings on NSXv * Admin Utility: List spoofguard policy mappings * Admin Utility: Delete orphaned NSXv edges * Fix show 'security-groups' field for port with no security-groups * NSX|T: update nova configuration with ovs\_bridge * Ensure that method's default argument shouldn't be mutable * NSXv: add address pair support * Admin utility: list orphaned NSXv edges * Adding a required field to when requesting to add an ns-group member * Remove session flush in nsx v3 plugin * LBaaSv2 driver * Add support for adding update callbacks * Framework for debugging nsx-openstack setup * Updated from global requirements * Rename "routes" to "results" for consistency * Rename 'rule\_id' to 'id' for consistency * NSX v3 API client error propagation * Adding firewall default rule to allow outgoing dhcp traffic * [NSXv]: Adds method to get all spoofguard policy mappings * [NSXv]: Add get and list methods for spoofguard policy * Cleanup utility for nsxt plugin * Updated from global requirements * Move 'locking\_coordinator\_url' to common configuration section * psec profile distributed locking * Updated from global requirements * Add networking-l2gw to tox\_install.sh * NSX-v3: Fix security-group update missing members attribute * nsx v3 router refactor * Attach psec profile only with IP addr * Updated from global requirements * nsx v3 ut cleanup * NSXv3: Fix rpc code * Fix DHCP agent updates * Fix missing nsx v3 layer3 api changes * [NSXv3]: Fix update\_port call * Fix missing function in nsxlib v3 * NSXv: Adding Subnetpools unittests * Fix DHCP firewall rule * Set external ID for nsxvswitch bridge to "nsx-managed" * nsx v3 port security support * Updated from global requirements * nsx v3 lport updates * Explicitly add 'members' field when creating ns-group * Updated from global requirements * Fix dhcp\_router\_id DB integration error * refactor NSX v3 UTs * [NSXv3]: Enable tempest tests for security-groups * NSXv3: static route support * Fix loadbalancer driver call NeutronManager init on every call * [NSXv]: Add conf param for exclusive router edge size * Revert "Fix subnet use vdr dhcp edge for dhcp service" * Enable HA on the edge which works as L2 gateway * Reorganize vmware\_nsx/tests structure * NSX|V: enable update for subnet from horizon * NSX|V: set the edge\_ha default state to be False * Add router-size when creating an exclusive router * Update coverage file to exclude cisco files * nsx v3 lport refactor * nsx v3 port security plumbing * Change ignore-errors to ignore\_errors * NSXv3: Completing security-group implementation * dfw\_api.py: fix nsxclient typo * Updated from global requirements * NSXv: enforce backend limitations with IPv6 * NSXv: set the 'aggregatePublishing' on the manager * Divide vmware\_nsx/nsxlib/ into mh and v3 subdirectories * [NSXv]: Fix router attribute validation check * NSXv driver for Layer 2 gateway * Updated from global requirements * NSX\_V3: do not configure ovs manager * Divide vmware\_nsx/plugins into plugin-specific subdirectories * Ensure that DHCP agent is configured with correct bridge name * L2 gateway migration skip * Metadata LB configuration should be synchronous * Updated NSXv plugin parameter descriptions * NSXv3: FIP support * Divide vmware\_nsx/services into plugin-specific subdirectories * Add sample localrc for nsx\_v3 * NSXv: ensure that locking is done with flag 'external=True' * Move vmware\_nsx/neutron/plugins/vmware to vmware\_nsx * Move vmware\_nsx/neutron/tests to vmware\_nsx/tests * Move vmware\_nsx/neutron/services to vmware\_nsx/services * Move vmware\_nsx/neutron/db to vmware\_nsx/db * Replace missing tag with "" instead of None * NSXv3: router intf support * NSXv3: Router GW support * NSXv3: Add test coverage for build\_v3\_tags\_payload * NSXv3: Add neutron-id tag to backend * Update references for vmware plugin config (etc part) * NsxV3: Router preparation for GW/intf/FIP support * Remove version attribute from setup.cfg * Add NSGroup container and default firewall section for security-groups * Security Groups implementation * Move vmware-etc to top directory * NsxV3: external network support * NSXv3: Update backend during network update * Fix README.rst * Bump version to 7.0.0 * Fix duplicate dhcp edge name error * Fix dhcp service edge select/delete conflict * Use synchronous call when updating VDR interfaces * Fix dirty DB entries left on deleting vdr with GW * Fix subnet use vdr dhcp edge for dhcp service * Fix logging message on VIP update * NSXv: ensure that member update are atomic across API workers * Handle VDR connected to subnet while not the DGW * NsxV3: Add test-list for tempest * Updated from global requirements * Fix a typo in comments in nsx.ini * Fix NSX-v test and update logic for v6 subnets * Updated from global requirements * Add in pep8 checks for the tools directory * Fix a typo in comments true -> True * Stop doing any magic cloning of neutron during CI * Add support for dhcp extra opt to nsx v3 plugin * Add Model-migrations-sync test * Fix dhcp bindings missing problems * Fix some routes disappeared on TLR * Nsxv: Fix db out of sync with backend * Fix comment concerning metadata agentless mode * Fix exception handling in update port call * NSX|V3: create dhcp profile at boot time * NSXv: validate that router is not None * Fix fip problems for provider router * LBaaS: allow configuration of SOURCE\_IP LB method * Correct the version checking for rp\_filters * Remove router\_type for distributed router * Updated from global requirements * Add support to NSXv3 driver to verify certs * Add unit tests to test dhcp switching profile * NSXv3: Introduce config param to add dhcp switching profile * NSXv3: Fix router interface deletion * Tag the alembic migration revisions for Liberty * Skip test\_create\_router\_gateway\_fails test * Add conf parameter to local.conf to set default l2gw * Fix the launchpad url correct * Update L3 codes due to API changes * Missed l2gw port check in driver * Install vmware-nsx during 'stack install' phase * Move nsx\_l2gw\_driver to DEFAULT section in nsx.ini * NSXv3: Add more unit tests for layer 2 gateway support * Delete security group bindings on port delete * Fix the L2gw migration script * LBaaS: up state update for members in ERROR state * NSXv3: Add backend driver for Layer 2 gateway * Updated from global requirements * Change the first letter of Log sentence to uppercase * Handle concurrency with LBaaS FW section update * NSXv: fix LBaas logging issue * rp\_filter status get not supported in NSXv 6.1.x * NSX-MH: Fix test\_update\_subnet\_gateway\_for\_external\_net UT * NSX: Move DB models as part of core vendor decomposition * Adding Neutron API extensions * NSX: Register migrations at install time * Refactor neutron\_plugin\_configure\_service() * Updated from global requirements * [NSXv3]: Add sample conf variables to nsx.ini * Deploy NSX Edges in HA mode * Cache lb\_plugin in the loadbalancer driver * Use min and max on IntOpt option types * Ensure NSXv driver can verify certificates * DVS: Verify certificate on vCenter connections * Fix test module import due to a rename in neutron * Define VIF\_TYPE\_DVS in vmware-nsx repo * Fix some typos in docstring and error messages * Replace references to VC with vCenter * Updated from global requirements * Fix logging message while updating LBaaS pool * Make edge cluster specification optional * NSXv: do not fail on spoofgaurd policy error * Updated from global requirements * Adds CRD operations for NSX Qos Service Plugin * Fix failing unit test due to exception change * NSXv: ensure per process task manager * Remove duplicate NSXv3 option * Add parent/tag integration for docker stuff * NSX-mh: Failover controller connections on socket failures * NSXv3: Support network creation options * Fix nsxlib.v3.client.delete\_resource * Updated from global requirements * NSXv: prevent host route creation * Fix interlock between dhcp conf and dhcp binding * L2 gateway service plugin support NSX-V backends * Initialize alembic branches for vmware-nsx repo * NSX-mh: Remove \_get\_fip\_assoc\_data * NSX-mh: use router\_distributed flag * Mishandled concurrency with metadata on DHCP * Support Nova metadata address change * Fix concurrently update/CD dhcp on same edge * Verify dhcp/net binding exists before using * Mitigate DB inconsistency on dhcp edge * Randomly select available dhcp edges * Logging jobs number per edge * Open firewall for static routes relative flows * Fix Default edge firewall rule * NSX-v3: Add support for port update * Use ICMP to health-check metadata proxies * Add HTTP method, URL path parameters to monitor * Updated from global requirements * LBaaS: Add member status to member statistics * Mark LBaaS pool as ERRORed when no Edge is found * Fix using the None object * Prevent deletion of router port with LBaaS config * Move vmware plugin from neutron to vmware\_nsx repo (etc part) * NSXv: fix broken unit tests * Drop ipv6 addresses before sending to nsx-t backend * Add support for L2 gateway service plugin * Add metadata\_initializer configuration parameter * Adds support to create qos-switching-profile * Log response body on bad requests from nsx * NSX: Rename default\_interface\_name option * Updated from global requirements * Fix plugin url in readme * Fix fetching LBaas pool stats failure * Fix DVR for NSX-mh * Fix LBaaSv1 pool member deadlock * NsxV3: Adding Neutron security-group processing and testing * Verify that VDR DHCP binding exists before using * Remove warning on router admin state not supported * Remove check for overlap between the fixed ips and addresspairs * Nsx manager client: return meaningful errors * Initialize a per-process tooz coordinator * Prevent failures on router delete * NSX-mh: perfrom appropriate pre-delete checks * Set default rp\_filter for NSXv 6.2 * NSXv: prevent toggling of ditributed router * Updated from global requirements * Add nsx-manager config option to nsx.ini * Add support for dhcp extra opt * Refactoring out common client api methods * Revert "Fix routes configuration failed at backend" * Updated from global requirements * Don't fail when adding an existing nsx-security-group member * DHCP Edge ping bugfix * stop using namespace packages * NSXv: ensure that update\_interface is synchronous * NSXv: only configure DHCP on backend if subnet is configured * Fix cleanup upon metadata failure * Bugfix: start coordinator on locking init * NSXv: fix bad log message * Fix routes configuration failed at backend * NSXv: fix debug log message * NSXv: fix broken log * NSXv: support for multi api\_workers * Add locking to LBaaSv1 driver, exclusive router * Anothher step towards Python 3 compatibility * Fix broken unit tests * Updated from global requirements * Adding debug logging when vnic fixed-ips are assigned or updated * Updated from global requirements * Fix LBaaSv1 exceptions * MH: Limit enabled versions for agentless mode * NSX-mh: allow names with underscore for l2gw transport * Updated from global requirements * NSXv: enable support for a portgroup provider network * Metadata VDR bugfix * DHCP Edge ping bug * Fix failing unit tests * Put user/password in nsx\_v3 during devstack setup * Adding retry when requesting NSXv to add security-group member * Remove skipped tests due to missing mocks * Updated from global requirements * Disable autoconfiguration of rules on Edge * create\_port in plugin is sometimes called within a transaction * NSX3: reuse common code * Fix update subnet from disable-dhcp to enable-dhcp * NSXv: ensure that DHCP bindings are done synchronously * NSX: \_delete\_port was changed to ipam.delete\_port * Fix unit tests for master * Fix DBDuplicateEntry in unit tests * nsxv3: implement address\_bindings * Split out nsx\_user/pass to nsxv3 section * Make use of oslo\_service for loopingcall * Update FW cluster section when adding additional clusters * Support update subnet dhcp status * Bugfix for metadata support with overlapping IPs * Python 3: dict\_keys object does not support indexing * Updated from global requirements * Fix class test name to be V2 not V3 * Updated from global requirements * Fix FIP datapath is broken by disabling enable\_snat attribute * Fix delete API response code * Add unit tests for nsxlib-backend methods * Add neutron\_version tag to NSX entities * NSXv: fix broken unit tests * NSXv: add in extra configuration validations * Recreate metadata route after VDR gateway is set * Add delay to sync thread startup * Add Openstack tenant-id as os-tid to NSX * Fix failures caused by upstream changes * Return 400 status code if l3\_ext network type is misused * Fix typo * Fix test\_router\_add\_interface\_port\_without\_ips testcase * Updated from global requirements * Do not convert RXTX factor to int * Fix routes disappeared by shared routers with different tenants * Updated from global requirements * Updated from global requirements * Sort our neutron requirements * Fixing default security-group ingress rule * Fix delete provider network with dvs backend * Fix update subnet gateway when creating the edge binding * Support reconfiguration of metadata service * Enable neutron unit tests for network and port * Update to the latest requirements * Distributed locking support * Change stackforge to openstack * Update .gitreview file for project rename * Pass admin\_status from CLI to NSX for port-create * Add devstack support for nsx-v3-plugin * Set display\_name for port on NSX if specified in Neutron * Skip failing unit tests * NSXv: fix bad log message * Handling IndexError in case there are no vdnscope * NSX-mh: fix default l2 gw connections * Removing port dhcp binding when detaching interface * Fixing port index update with setting index to None * move logical switch deletion after edge delete * Prevent RXTX factor from being updated on network queue * Fix a typo * Python3: replace dict.iteritems with six.iteritems * Change spoofguard publish on vnic level * Support for router admin-state-up * NSXv: fix broken unit tests * NSXv: add in support for dhcp\_lease\_time * NSXv: Update DHCP binding configurations * NSXv: fix log message * NSXv: fix hacking pep8 issues * Allow ping reply from DHCP Edge appliances * Metadata for VDR networks * Fix redundant retry-on-exception when removing vnic from spoofguard * Make Edge appliance credentials configurable * Create backend security-group and cluster dfw section if needed * Removing use of contextlib.nested * Expect a IPv6 unit test failure * Fix a typo in nsxv\_cleanup * Fix uplink direction and don't specify index * NSXv: add in support for update\_subnet * Enable static routes on shared router * Tell NSX to delete all ports when delete lswitch * Fix hard coded credentials * Delete port from NSX backend first * Fix nosnat environment blocked by edge firewall problem * Remove sorted func in nsxv plugin * NSX: fix broken tests for ICMPv6 * DVS: ensure that horizon works with DVS plugin * Add DVS devstack support * DVS: fix issues with spinning up VMs * NSX-MH: do update port on backend outside of db transaction * NSXv: Add fake tenant id to metadata network * Address exception when creating metadata service * Fix shared router is bound on a deleting edge * Fix identity key conflict error in sqlalchemy * VMware: fix broken tests * Backend handling on security group update * More fixes and cleanups * Fix port delete * Implement router delete and fix router create * Add port-binding extension and some small bug fixes * Add basic integration with nsx\_v3 * nsx\_v3\_plugin: Initial plugin framework * Fix unit tests import paths * Enable subnet-allocation for the MH plugin * Refactoring of security-groups operation * Fix broken unit tests * Recover unit tests from upstream changes * Devstack plugin: flush addresses on public bridge * LBaaS plugin bugfix * Fixing unittests * Fix after 'breakage' * Statistics support for Neutron LBaaS plugin * VMWare Edge appliance loadbalancer driver * Fix "Lock wait timeout" DB error on router binding * Adopt oslo incubator * Fix tests for routers * Add sync fake deploy edge driver call * DVS: ensure that provider networks work correctly * Use uuidutils from oslo\_utils * Add method to retrieve vdn\_scope\_id * Fix breakages due to upstream changes * Fix DVR for NSX-mh * Fix unit tests * Address group for metadata port should use admin context * Change the admin tenant for metadata handler * DVS: add security group extension * Finish switch to oslo\_log * Fixing spoofguard policy deletion * NSXv: do not create subnet under DB transaction * Add async param to deploy\_edge in fake\_vcns * Enable hacking to be run * Fix overlapping between gw and intf address * Adding unittesting for edge\_utils * Add simple\_dvs\_plugin * Replace "select for update" on nsxv\_router\_bindings * Use oslo\_log for logging * Fix the async between network and edge * Fix multiple workers occupy the last available vnic of shared edge * Fix binding shared router on unavailable edge * Add check-nsx-config to console scripts * Using python retrying module * Edge pool concurrency * Fix deploy redundant backup edges in multiple servers * Default gateway setting for metadata proxy * All the new subnet creation failed after the subnet count crossed 160 * NSXv: ensure that selection of edges is 'atomic' * Address race conditions with edge allocations: * Edge locks * Remove imports from neutron.plugins.vmware * Delete disassociated floating ips on external network deletion * Fix import path * Change default routing edge's size from Large to compact * Do not need to update dhcp edge when dhcp is disabled * Fix tenant can't associate fip to its vm by a public router * Don't wait on deleting interface when migrating routing service * Fix many interfaces adding on router problem * static routes support for VDR * Fix router create failed at the backend * GW&FIP&interface support for shared router * NSX-mh: Synchronize dhcp port creation * Do not attempt ext gw port reset for deleted routers * Use DEVICE\_OWNER\_DVR\_INTERFACE in NSX-mh plugin * Exclusive Router Support * Adding devstack hooks for 'unstack' run phase * Implement devstack external plugin * Introducing security group container * fix shared-router/metadata patch merge codes bug * Complete DHCP locking * Lock for security group vnic update * Locking support * Fix slow unit tests * Do not pass muatble object as parameter * Avoid desyncronization between neutron db and dhcp edge * Ensure that NSXv manager support concurrency * Avoid subInterface not found exception * Fix adding router interface job failed * Adding a retry mechanism when approving and publishing ip address * Metadata service concurrent init bugfix * Fix metadata and dhcp using same vnic problems * Add a configuration option to enable the use of spoofguard * Update dhcp edge metadata and vnic first and delete network * metadata\_shared\_secret should support encrypt * VDR Create failed due to metadata issue * Metadata address range change to 169.254.128.0/17 * Handle concurrent execution of metadata init * Adding Port Security support * Metadata init time improvements * Rename subnet metadata\_providers extension * Remove faulty debug logs * Fix wrong method call in nsx\_v.py * Metadata config validation * Metadata shared secret support * Metadata bugfix * Metadata providers bugfix: * Fix update gw\_info with 'enat\_snat' failed * NSXv: ensure that 'agent' is registered for extensions * Fix pep8 and py27 jobs * Remove network gateway mixin from nsx-v plugin * Use pretty\_tox for running unit tests * Add db mixins for NSX extensions * Use neutron extensions * vmware unit tests: tenant\_id must be in neutron ctx * Update .gitignore file * adapt UT to have a patch merge in Neutron * Migrate to oslo.concurrency * Rename qexception->nexception * Fix retry logic for UnAuthorizedRequest in race-condition * NSX: synchronize floating IP operations * NSX: Remove logic for creating chained logical switches * Removing neutron configuration file * Rename NsxvSectionMapping class * NSX plugin security group rules summarization * Fix router firewall interface tests * VMware-NSX: update documentation to reference VMware-NSX * VMware-NSX: clean up requirements file * VMware-NSX: skip tests that have ordering problems * VMware: initial NSXv developments * VMware: fix security group check on port create * VMware: fix gitreview 0.0.1 ----- * Create vmware-nsx with history * Updated from global requirements * Add OVS status and fix OVS crash * Cleanup req\_format in test\_api\_v2\_resource * Imported Translations from Transifex * Cisco: unsupported format character in log format * Correct arguments to logging function * Remove locking from network and subnet delete op * Removed unused iso8601 dependency * Add functional test for l3-agent metadata proxy * Remove mlnx plugin * Set timeout for functional job * Enable test\_migration * tests: initialize admin context after super().setUp call * Fixed test test\_update\_port\_security\_off\_address\_pairs * openvswitch/ofagent: Remove OVS.enable\_tunneling option * Imported Translations from Transifex * Remove unused dependencies * Generate testr\_results.html for neutron functional job * L3 Agent restructure - observer hierarchy * Replace non-ovs\_lib calls of run\_vsctl with libary functions * Don't restore stopped mock that is initialized in setUp() * Separate wait\_until to standalone function * Imported Translations from Transifex * Mock up time.sleep to avoid unnecessary wait in test\_ovs\_tunnel * Catch duplicate errors scheduling SNAT service * Fix for KeyError: 'gw\_port\_host' on l3\_agent * Migrate to oslo.context * Have L3 agent catch the correct exception * Not nova but neutron * Remove broad exception catch from periodic\_sync\_routers\_task * Fix race condition in ProcessMonitor * Updated from global requirements * Refactor process\_router method in L3 agent * Switch to using subunit-trace from tempest-lib * Move classes out of l3\_agent.py * Prettify tox output for functional tests * Services split, pass 2 * Fix IPv6 RA security group rule for DVR * Imported Translations from Transifex * ofa\_test\_base: Fix NoSuchOptError in UT * Add lbaasv2 extension to Neutron for REST refactor * Remove TODO for H404 * Update rpc\_api docs with example version update * Auto allocate gateway\_ip even for SLAAC subnets * Updated from global requirements * Split services code out of Neutron, pass 1 * Use comments rather than no-op string statements * Enforce log hints * Disallow log hints in LOG.debug * Reduce code duplication in test\_linux\_dhcp * Print version info at start * Enforce log hints in ofagent and oneconvergence * Make sudo check in ip\_lib.IpNetnsCommand.execute optional * Move set\_override('root\_helper', ...) to base functional class * Imported Translations from Transifex * Update i18n translation for NEC plugin log msg's * return the dict of port when no sec-group involved * Imported Translations from Transifex * Update i18n translation for IBM plugin log msg's * Workflow documentation is now in infra-manual * tox.ini: Prevent casual addition of bash dependency * Updated from global requirements * Remove RpcCallback class * Convert several uses of RpcCallback * Fix up an old RpcProxy assumption * Remove RpcProxy class * Cleanup recent generalization in post mortem debugger * radvd: pass -m syslog to avoid thread lock for radvd 2.0+ * Get rid of py26 references: OrderedDict, httplib, xml testing * Imported Translations from Transifex * Fix enable\_metadata\_network flag * Fix program name in --version output * Enforce log hints in opencontrail * Update i18n translation for Metaplugin plugin * Update i18n translation for Brocade plugin log msg's * Update i18n translation for Nuage plugin * Update i18n translation for Embrane plugin * Enforce log hints in neutron.plugins.plumgrid * Remove ovs-vsctl call from OVSInterfaceDriver * Update i18n translation for Midonet plugin * Enforce log hints in neutron.plugins.sriovnicagent * Enforce log hints in neutron.plugins.hyperv * Imported Translations from Transifex * Drop RpcProxy usage from DhcpAgentNotifyAPI * Updated the README.rst * Fix base test class for functional api testing * Use oslo function for parsing bool from env var * Don't block on rpc calls in unit tests * Refactor test\_migration * Strip square brackets from IPv6 addresses * Update i18n translation for BigSwitch plugin log msg's * Imported Translations from Transifex * pretty\_tox.sh: Portablity improvement * iptables\_manager: Fix get\_binary\_name for eventlet * test\_dhcp\_agent: Fix no-op tests * Drop old code from SecurityGroupAgentRpcApiMixin * Drop RpcProxy usage from ml2 AgentNotifierApi * Update i18n translation for Mellanox plugin and agent log msg's * Drop RpcProxy usage from L3AgentNotifyAPI * Simplify L3 HA unit test structure * Update i18n translation for VMware NSX plugin log msg's * Alter execute\_alembic\_command() to not assume all commands * hacking: Check if correct log markers are used * Fix hostname validation for nameservers * Removed python2.6 rootwrap filters * Imported Translations from Transifex * MeteringPluginRpc: Fix crash in periodic\_task * Enable undefined-loop-variable pylint check * Remove unused variables from get\_devices\_details\_list * Change description of default security group * Fix incorrect exception order in \_execute\_request * Migrate to oslo.i18n * Migrate to oslo.middleware * Remove unused xml constants * Drop RpcProxy usage from MeteringAgentNotifyAPI * Drop RpcProxy usage from l2population code * Drop RpcProxy usage from cisco apic ml2 plugin * Drop RpcProxy usage from oneconvergence plugin * Synced processutils and periodic\_task modules * Migrate to oslo.utils * Fix floating-ips in error state in dvr mode * Reject trailing whitespaces in IP address * Imported Translations from Transifex * CSCO:Tenants not to access unshared n/w profiles * Drop sudo requirement from a unit test * Remove Python 2.6 classifier * Update i18n translation for Cisco plugins and cfg agent log msg's * Remove ryu plugin * Imported Translations from Transifex * Drop RpcProxy usage from nec plugin * Drop RpcProxy usage from mlnx plugin * Drop RpcProxy usage from ibm plugin * Drop RpcProxy usage from hyperv plugin * Drop RpcProxy usage from cisco.l3 * Drop RpcProxy usage from cisco.cfg\_agent * Drop RpcProxy usage from brocade plugin * Update rally-jobs files * Test HA router failover * Imported Translations from Transifex * Update i18n translation for linuxbridge log msg's * Update i18n translation for openvswitch log msg's * Update i18n translation for ML2 plugin log msg's * Updated from global requirements * Imported Translations from Transifex * Enforce log hints in neutron.services * Enforce log hints in neutron.services.metering * Fix metadata proxy start problem for v6-v4 network * Fix AttributeError in RPC code for DVR * Drop RpcProxy usage from bigswitch plugin * Drop RpcProxy usage from VPNaaS code * Drop RpcProxy usage from metering\_agent * Fix context.elevated * Tighten up try/except block around rpc call * Implement migration of legacy routers to distributed * run\_tests.sh OS X script fixes * Eliminate unnecessary indirection in L3 agent * Show progress output while running unit tests * Drop RpcProxy usage from LBaaS code * Enforce log hints in neutron.services.loadbalancer * Enforce log hints in neutron.services.firewall * Enforce log hints in neutron.services.l3\_router * enable H401 hacking check * enable H237 check * Updated from global requirements * Check for default sec-group made case insensitive * Update i18n translation for neutron.server/scheduler log msg's * Update i18n translation for neutron.notifiers log msg's * Update i18n translation for neutron.common/debug log msg's * Imported Translations from Transifex * ofagent: Remove obsolete bridge\_mappings (plugin side) * Delete FIP namespace when last VM is deleted * Fix a race condition adding a security group rule * Drop RpcProxy usage from FWaaS code * Drop RpcProxy usage from neutron.agent.rpc.PluginApi * Fix a copy/pasted test mistake * Drop test code copied from nova * Drop several uses of RpcCallback * Add some basic rpc api docs * Drop RpcCallback usage from DhcpRpcCallback * Drop RpcProxy usage from PluginReportStateAPI * Fix hostname regex pattern * Catch NoResultFound in \_get\_policy\_profile\_by\_name * Validate loadbalancing method when updating a pool * Update i18n translation for neutron.api log msg's * Catch DBReferenceError exception during binding a router * Enable default SNAT from networks connected to a router indirectly * Imported Translations from Transifex * BSN: Optimistic locking strategy for consistency * BSN: include missing data in floating IP call * ofagent: Remove obsolete bridge\_mappings (agent side) * NSX: Validate gateway device list against DB * Drop RpcProxy usage from MetadataPluginApi * Drop usage of RpcProxy from L3PluginApi * Prevent an iteration through ports on IPv6 slaac * Use a string multiplier instead of 59 repetitions * Convert all incoming protocol numbers to string * Updated from global requirements * Correct raw table regex in test\_security\_groups\_rpc * BSN: Add network to ext\_gw\_info sent to backend * BSN: Set inconsistency record on delete failure * Fix PYTHONHASHSEED bugs in test\_security\_groups\_rpc * Subnet delete for IPv6 SLAAC should not require prior port disassoc * Fix client side versions in dhcp rpc API * Drop usage of RpcProxy from DhcpPluginApi * linuxbridge-agent: make vxlan unicast check more efficent * Moved out common testcases from test\_type\_vxlan.py * Update i18n translation for neutron.extension log msg's * Update i18n translation for neutron.db log msg's * Update i18n translation for neutron.cmd log msg's * Update i18n translation for neutron.agents log msg's * enable F812 check for flake8 * enable F811 check for flake8 * Decrease policy logging verbosity * Support pudb as a different post mortem debugger * Cleanup and refactor methods in unit/test\_security\_groups\_rpc * switch to oslo.serialization * Add rootwrap filters for ofagent * Updated policy module from oslo-incubator * Resolving some spelling mistakes * Fix for FIPs duplicated across hosts for DVR * Drop neutron.common.rpc.MessagingTimeout * Remove neutron.common.rpc.RemoteError * Remove neutron.common.rpc.RPCException * Remove useless return * Cisco VPNaaS and L3 router plugin integration * Fix missing allowed command in openvswitch xenapi agent * fix event\_send for re-assign floating ip * Remove openvswitch core plugin entry point * rootwrap config files reference deleted quantum binaries * Fix L3 HA network creation to allow user to create router * Update default value for agent\_required attribute * SRIOV: Fix Wrong Product ID for Intel NIC example * Imported Translations from Transifex * Updated from global requirements * Purge use of "PRED and A or B" poor-mans-ternary * Include call to delete\_subnet from delete\_network at DB level * Use correct base class for unit tests for ML2 drivers * Replace "nova" entries in iptables\_manager with "neutron" * Drop and recreate FK if adding new PK to routerl3bindings * Imported Translations from Transifex * Remove duplicate ensure\_remove\_chain method in iptables\_manager * ML2: fix file permissions * Fix sneaky copypaste typo in ovs agent scheduler test * Make L2 DVR Agent start successfully without an active neutron server * Detect if iproute2 support SR-IOV commands * Use stop() method on MessageHandlingServer * Rename constant to a more appropriate name * Big Switch: Fix SSL version on get\_server\_cert * Check for concurrent port binding deletion before binding the port * Imported Translations from Transifex * Batch ports from security groups RPC handler * Fix incorrect int/tuple comparison during binary search * Big Switch: Send notification after port update * Allow to add router interface to IPv6 SLAAC network * ML2 Cisco Nexus MD - not overwriting existing config * Reorder operations in (l3\_dvr) update floating ip * Use RPC instead of neutron client in metadata agent * Add assertion to test\_page\_reverse method * Adds an option to enable broadcast replies to Dnsmasq * Add advsvc role to neutron policy file * NSX: allow multiple networks with same vlan on different phy\_net * NSX: Fix foreign key constraint delete provider network * Imported Translations from Transifex * Fix 'Length too long' error in neutron-dsvm-functional tests * Remove use\_namespaces from RouterInfo Property * Fix handling of CIDR in allowed address pairs * Updated from global requirements * Remove XML support * enable F402 check for flake8 * enable E713 in pep8 tests * NEC plugin: Allow to apply Packet filter on OFC router interface * \_update\_router\_db: don't hold open transactions * Big Switch: Switch to TLSv1 in server manager * Only resync DHCP for a particular network when their is a failure * Validate network config (vlan) * Validate local\_ip for OVS agent is actual ip address * Imported Translations from Transifex * Hyper-V: Remove useless use of "else" clause on for loop * Enable no-name-in-module pylint check * Move disabling of metadata and ipv6\_ra to \_destroy\_router\_namespace * Updated from global requirements * Adds macvtap support * Remove duplicate import of constants module * Switch run-time import to using importutils.import\_module * Enable assignment-from-no-return pylint check * tox.ini: Avoid using bash where unnecessary * l2population\_rpc: docstring improvements * Fix race condition on processing DVR floating IPs * neutron-db-manage finds automatically config file * Ensure test\_agent\_manager handles random hashseeds * Ensure ofagent unit tests handles random hashseeds * Moves the HA resource creations outside of transaction * Modify docstring on send\_delete\_port\_request in N1kv plugin * Empty files should not contain copyright or license * Remove superfluous except/re-raise * Remove single occurrence of lost-exception warning * Schema enhancement to support MultiSegment Network * Remove redundant initialization and check from DVR RPC mixin * Improve performance of security group DB query * Optimize query in \_select\_dhcp\_ips\_for\_network\_ids * Updated cache module and its dependencies * Updated service.py and its dependencies * Updated fileutils and its dependencies * Cisco N1kv: Fix update network profile for add tenants * DB: Only ask for MAC instead of entire port * Only fetch port\_id from SG binding table * NSX: Make conn\_idle\_timeout configurable * nsx plugin: keep old priority when reconnecting bad connection * l3\_agent: avoid name conflict with context * Guard against concurrent port removal in DVR * Refactor l2\_pop code to pass mac/ip info more readably * Fix KeyError in dhcp\_rpc when plugin.port\_update raise exception * Refactor \_make\_subnet\_dict to avoid issuing unnecessary queries * openvswitch: Remove no longer used options * VPNaaS Cisco unit test clean-up * Call DVR VMARP notify outside of transaction * remove E251 exemption from pep8 check * Race for l2pop when ports go up/down on same host * Catch exceptions in router rescheduler * Minor: remove unnecessary intermediate variable * Handle unused set\_context in L3NatTestCaseMixin.floatingip\_with\_assoc * Use EUI64 for IPv6 SLAAC when subnet is specified * Arista L3 Ops is success if it is successful on one peer * Add unique constraints in IPAvailabilityRange * Remove two sets that are not referenced * Update VPN logging to use new i18n functions * mock.assert\_called\_once() is not a valid method * Check for VPN Objects when deleting interfaces * Compare subnet length as well when deleting DHCP entry * Add pylint tox environment and disable all existing warnings * Updated from global requirements * update the relative path of api\_extensions\_path * Reduce security group db calls to neutron server * Ignore top-level hidden dirs/files by default * Remove some duplicate unit tests * NSX: drop support to deprecated dist-router extension * Execute udevadm on other linux installs * Avoid constructing a RouterInfo object to get namespace name * Drop sslutils and versionutils modules * Imported Translations from Transifex * Remove an argument that is never used * Refactor \_process\_routers to handle a single router * Add Juno release milestone * Add database relationship between router and ports * Fix L2 agent does not remove unused ipset set * Add Juno release milestone * Add database relationship between router and ports * Disable PUT for IPv6 subnet attributes * Skip IPv6 Tests in the OpenContrail plugin * Remove all\_routers argument from \_process\_routers * update ml2\_migration to reflect optional methods * Disable PUT for IPv6 subnet attributes * Do not assume order of lvm.tun\_ofports set elements * Skip IPv6 Tests in the OpenContrail plugin * Removed kombu from requirements * Updated from global requirements * Imported Translations from Transifex * Imported Translations from Transifex * Remove two sets that are not referenced * Forbid update of HA property of routers * Forbid update of HA property of routers * Teach DHCP Agent about DVR router interfaces * Updated from global requirements * Allow reading a tenant router's external IP * Raise exception if ipv6 prefix is inappropriate for address mode * Retry getting the list of service plugins * Add missing methods to NoopFirewallDriver * Don't fail when trying to unbind a router * Modify the ProcessMonitor class to have one less config parameter * Big Switch: Don't clear hash before sync * Remove sslutils from openstack.common * Divide \_cleanup\_namespaces for easy extensibility * L3 Agent should generate ns\_name in a single place * Add comments to iptables rules to help debugging * nit : missing a "%s" in a log message * L3 agent should always use a unique CONF object * Iterate over same port\_id if more than one exists * Fix setup of Neutron core plugin in VPNaaS UT * remove openvswitch plugin * Fix pid file location to avoid I->J changes that break metadata * Don't fail when trying to unbind a router * remove linuxbridge plugin * Allow reading a tenant router's external IP * Fix sleep function call * Add admin tenant name to nova notifier * ML2: move L3 cleanup out of network transaction * Open Kilo development * ML2 Cisco Nexus MD: Fix UT to send one create vlan message * Implement ModelsMigrationsSync test from oslo.db * Imported Translations from Transifex * Update migration scripts to support DB2 * Do not assume order of report list elements * Disallow unsharing used firewall policy * Imported Translations from Transifex * Add missing methods to NoopFirewallDriver * Raise exception if ipv6 prefix is inappropriate for address mode * Fix broken port query in Extraroute test case * Revert "Cleanup floatingips also on router delete" * fix dvr snat bindings for external-gw-clear * Fix quota limit range validator * Remove default dictionary from function def * Fix KeyError when getting secgroup info for ports * Create DHCP port for IPv6 subnet * Deletes floating ip related connection states * Do not lookup l3-agent for floating IP if host=None, dvr issue * Remove RPC notification from transaction in create/update port * Do not assume order of body and tags elements * Remove the translation tag for debug level logs in vmware plugin * Retry getting the list of service plugins * Fix entrypoint of OneConvergencePlugin plugin * Forbid regular users to reset admin-only attrs to default values * Finish small unit test refactor of API v2 tests * Security groups: prevent race for default security group creation * Stop admin using other tenants unshared rules * Eliminate OrderedDict from test\_api\_v2.py * Mock out all RPC calls with a fixture * Add logging for enforced policy rules * Imported Translations from Transifex * Remove unnecessary \_make\_port function in BSN UTs * ofagent: Drop log level of tenant-triggerable events * Set vif\_details to reflect enable\_security\_group * Use dict\_extend\_functions to populate provider network attributes * Fix foreign key constraint error on ml2\_dvr\_port\_bindings * Some clean up of code I'm preparing to modify * Indicate the begin and end of the sync process to EOS * DVR to delete router namespaces for service ports * Do not assume order of device\_ids set elements * Fix 500 error on retrieving metadata by invalid URI * Only setup dhcp interface if dhcp is not active on network * HA routers master state now distributed amongst agents * Rework and enable VPNaaS UT for Cisco CSR REST * Update URL of Ryu official site in ofagent README files * Set dsvm-functional job to use system packages * Delete a broken subnet delete unit test * Fix to delete user and group association in Nuage Plugin * Deletes FIP agent gw port when last VM is deleted * Delete DB records instead of tables to speedup UT * Stop exception log in Big Switch unit tests * Separate Configuration from Freescale SDN ML2 mechanism Driver * NSX plugin: set VNIC\_TYPE port binding attribute * Access correct key for template name * ofagent: Ignore unknown l2pop entry removals * Neutron metering does not check overlap ip range * Rename workers to api\_workers and simplify code * Fix DVR to service DHCP Ports * Tunnel ID range validation for VXLAN/GRE networks * Remove @author(s) from copyright statements * BSN: Add context to backend request for debugging * Don't create unused ipset chain * Imported Translations from Transifex * Avoid an extra database query in schedule\_snat\_router * Add HA support to the l3 agent * Stop ignoring 400 errors returned by ODL * Fix a test\_db\_plugin unit test side\_effect usage * Imported Translations from Transifex * Fix KeyError on missing gw\_port\_host for L3 agent in DVR mode * Stop using intersphinx * Updated from global requirements * Cisco N1kv: Remove vmnetwork delete REST call on last port delete * Remove the Cisco Nexus monolithic plugin * L3 Metering label as shared * Check for ports in subnet before deleting it from Nuage VSD * ofagent: Fix a possible crash in arp responder * Add a new scheduler for the l3 HA * Add functional testing to ipset\_manager * Properly handle empty before/after notifications in l2pop code * Remove logic for conditional migrations * Make Juno migrations config independent * Introduce havana initial state * Adds ipset support for Security Groups * Refactor l3\_agent.process\_router\_floating\_ip\_addresses * Cleanup floatingips also on router delete * use TRUE in SQL for boolean var * Remove faulty .assert\_has\_calls([]) * Fail on None before iteration attempt * Imported Translations from Transifex * ofagent: Remove broken XenAPI support * Passing admin tenant name to EOS * Fix for floating ip association and deletion * BSN: Allow concurrent reads to consistency DB * Remove useless check in \_rpc\_update\_firewall * Use renamed \_fail\_second\_call() in cisco nexus tests * Add L3 VRRP HA base classes * Allow DHCPv6 reply from server to client * Don't allow user to set firewall rule with port and no protocol * Added TAP\_DEVICE\_PREFIX info to common/constants * Fix comments in api.rpc.handlers * ofagent: Clean up logging * UTs: Disable auto deletion of ports/subnets/nets * Remove second call to get\_subnets in delete\_subnet * Changes to support FWaaS in a DVR based environment * Imported Translations from Transifex * Remove hints from schedule\_router * Call unbind\_snat\_servicenode from schedule router * NSX: Correct allowed\_address\_pair return value on create\_port * Add the unit tests for ml2.rpc module * Neutron should not use the neutronclient utils module for import\_class * Add unit-test assert to check dict is superset of dict * Pythonified sanity\_check.all\_tests\_passed * Removed direct access to MessagingServer * Remove subnet\_id from check\_ports\_exist\_on\_l3agent * Add requests\_mock to test-requirements.txt * Removed kombu from requirements * Fix metadata agent's auth info caching * Throw exception instances instead of classes * Add scheduler unit tests to enable bug fixes and refactoring * Fix AttributeError when setting external gateway on DVR router * Stop tracking connections in DVR FIP Namespace * Fixes formatting for debug output in neutron/agent/l3\_agent.py * Avoid testing code duplication which introduced testing bugs * Supply missing cisco\_cfg\_agent.ini file * Reset IPv6 detection flag after IPv6 tests * Remove unused arg to config.setup\_logging() * Updated from global requirements * Revert "Skip functional l3 agent test" * Fix leftover Timeout effecting most eventlet calls * shared policy shouldn't have unshared rules * ofagent: Remove @author tags and update copyright notices * Work toward Python 3.4 support and testing * Cleanup rename of get\_compute\_ports\_on\_host\_by\_subnet * Revert "Cisco DFA ML2 Mechanism Driver" * Refactor security group rpc call * Avoid auto-scheduling for distributed routers * Fix interface IP address for DVR with gateway * BSN: Bind external ports in ML2 driver * Remove SELECT FOR UPDATE use in delete\_firewall * Big Switch: Retry on 503 errors from backend * Remove absolute path in KillFilter for metadata-proxy * Implements sync mechanism between Neutron and Nuage VSD * ofagent: Implement physical\_interface\_mappings * ofagent: Enable local arp responder for TYPE\_LOCAL * ofagent: Enable local arp responder for TYPE\_FLAT * Implements ProcessMonitor to watch over external processes * Skip functional l3 agent test * ofagent: Local arp responder for VLAN * Prevent SystemExits when running tests * Big Switch: Separate L3 functions into L3 service * Apic drivers enhancements (second approach): Topology * Big Switch: Bind IVS ports in ML2 driver * Add functional test for IptablesManager * Clarify message when no probes are cleared * Remove reference to cisco\_cfg\_agent.ini from setup.cfg again * Fix a bug in Mellanox plugin RPC caused by secgroup RPC refactoring * Don't spawn metadata-proxy for non-isolated nets * l2pop: Allow network types overridable * ML2: Fix release of network segments to allocation pools * Fix a recent ipv6 UT regression * Imported Translations from Transifex * Add endpoint\_type parameter to MetaInterfaceDriver * Remove chain for correct router during update\_routers() * ofagent: Enable local arp responder for local VMs * ofagent: merge br-tun into br-int * Apic drivers enhancements (second approach): Sync * Apic drivers enhancements (second approach): L3 refactor * ML2 Type Driver refactor part 2 * Adds router service plugin for CSR1kv * Introduces a keepalived manager for HA * Support for extensions in ML2 * Cisco DFA ML2 Mechanism Driver * Improve some plugins help strings * Provide a quick way to run flake8 * Apic drivers enhancements (second approach): L2 refactor * Make SecurityGroupsRpcCallback a separate callback class * Subnets with prefix length 0 are invalid * Adding mechanism driver in ML2 plugin for Nuage Networks * Fix state\_path in tests * Add functional test for l3\_agent * remove explicit include of the ovs plugin * NSX: log request body to NSX as debug * Datacenter moid should not be tuple * Remove ovs dependency in embrane plugin * Layer 3 service plugin to support hardware based routing * Remove binding:profile update from Mellanox ML2 MD * Remove old policies from policy.json * Apic drivers enhancements (second approach): Backend * Make DvrServerRpcCallback a separate callback class * Make DhcpRpcCallback a separate callback class * Adding support of DNS nameserver and Host routes for the Nuage Plugin * Block downgrade from icehouse to havana * Use lockutils module for tox functional env * Do not use auto\_schedule\_routers to add router to agent * Fix func job hook script permission problems * Check for IPv6 file before reading * Remove SELECT FOR UPDATE use in update\_firewall * Fix l3 agent scheduling logic to avoid unwanted failures * Fix InvalidRequestError in auto\_schedule\_routers * Fix incorrect number of args to string format * Add support for provider-network extension in nuage Plugin * Make L3RpcCallback a separate callback class * Cisco VPN with in-band CSR (interim solution) * Inline "for val in [ref]" statements * Minor refactoring for add\_router\_to\_l3\_agent * Predictable iptables chains output order * Prefer "val !=/== ref" over "val (not) in [ref]" in conditions * Heal script: Drop fks before operating on columns * Fixed template of IPsecSiteConnectionNotFound message * Fix DVR to service LBaaS VIP Ports * Refactor test\_type\_gre/vxlan to reduce duplicate code * Fix heal\_script for MySQL specifics * Make log level in linux.utils.execute configurable * Imported Translations from Transifex * Networks are not scheduled to DHCP agents for Cisco N1KV plugin * ext-gw update on dvr router improperly handled by l3-agent * metering driver default value is different in code and config file * Fix for floatingip-delete not removing fip\_gw port * Increase the default poll duration for Cisco n1kv * Fix IpNetnsCommand to execute without root\_wrapper when no netns * Increase ovsdb\_monitor.SimpleInterfaceMonitor start timeout * Change autogenerate to be unconditional * Remove status initialization from plugin's create\_firewall * Set firewall state to CREATED when dealing with DVR * Add template attr. for subnet, router create in Nuage plugin * Implement ip\_lib.device\_exists\_with\_ip\_mac * Add \_store\_ip\_allocation method * Updated from global requirements * Refactor plugin setup helpers out of test.base * Raise proper exception in case duplicate ipv6 address is allocated * Do not explicitly set mysql\_engine * Fixes Hyper-V agent issue on Hyper-V 2008 R2 * Removing sorted() function from assertEqual() * Add hook scripts for the functional infra job * ML2 Type driver refactor part 1 * Minor refactoring of auto\_schedule\_routers * Add ipv6 forwarding for router namespaces * Refresh rpc\_backend values in unit tests to those from oslo.messaging * Add unit tests covering single operations to ODL * One Convergence: Skip all tests with 'v6' in name * VPNaaS: Enable UT cases with newer oslo.messaging * Do not log WARN messages about lack of L3 agents for DVR routers * Add specific docs build option to tox * Fix policy rules for adding and removing router interfaces * Refactor type\_tunnel/gre/vxlan to reduce duplicate code * Join tables in query for down L3 agents * Rename range to avoid shadowing the builtin * Fixes Hyper-V issue due to ML2 RPC versioning * A10 Networks LBaaS v1 Driver * Assign Cisco nw profile to multi-tenants in single request * Remove unused network parameter from \_allocate\_ips\_for\_port * corrects the typos in l3\_router\_plugin's comments * Support Stateful and Stateless DHCPv6 by dnsmasq * Implements securitygroup extension for nuage plugin * Fix bigswitch setup.cfg lines * Arista Layer 3 Sevice Plugin * Add config for visibility of cisco-policy-profile * Ensure ip6tables are used only if ipv6 is enabled in kernel * Remove invalid or useless initialization in test\_type\_vxlan * Fix migration set\_length\_of\_description\_field\_metering * Set InnoDB engine for all existing tables * Use oslo.db create\_engine instead of SQLAlchemy * Big Switch: Check for 'id' in port before lookup * Reorder operations in create\_vip * Send HTTP exceptions in the format expected by neutronclient * Change nexus\_dict to accept port lists * Update DVR Binding when router\_id changes * Imported Translations from Transifex * Remove auto-generation of db schema from models at startup * Cisco N1kv plugin to send subtype on network profile creation * Implement namespace cleanup for new DVR namespaces * Fix config option names in ml2\_conf\_sriov.ini * NSX: Avoid floating IP status reset * correct getLoggers to use \_\_name\_\_ in code * Skip FWaaS config mismatch check if RPC method is unsupported * NSX: lift restriction on DVR update * Updated from global requirements * Use jsonutils instead of stdlib json * Remove INACTIVE status from FWaaS * Ignore http\_proxy while connecting to test WSGI server * Fix interface add for dvr with gateway * l2pop: get\_agent\_ports: Don't yield (None, {}) * ML2: Make get\_device\_details report mac address as well * Delete DVR namespaces on node after removing last VM * Fix PortNotFound error during update\_device\_up for DVR * Option to remove routers from dead l3 agents * Remove SELECT FOR UPDATE use in ML2 tunnel driver add\_endpoint * Fix KeyError during sync\_routers * Fix PortNotFound exception during sync\_routers * VPNaaS: Cisco fix validation for GW IP * Raise NotImplementedError instead of NotImplemented * Imported Translations from Transifex * Fix duplicate function: test\_getattr\_unallowed\_attr * Preserve link local IP allocations for DVR fip ns across restart * Fix 404 error fetching metadata when using DVR * Raise exception for network delete with subnets presents * SecurityGroupRuleExists should point out rule id inseand of group id * Opencontrail plug-in implementation for core resources * Do not assume order of new\_peers list elements * Make plugin and l3plugin available as mixin's properties * Use call to report state when ovs\_agent starts up * add auth token to context * Fixes an issue with FIP re-association * NSX: unify the two distributed routing extensions * NSX: fix wording for configuration option * MLNX Agent: ensure removed ports get treated on resyncs * Add delete operations for the ODL MechanismDriver * Predictable field and filter ordering * Fixing neutron-db-manage with some options other than upgrade/downgrade * Removes extra indents from TestSubresourcePlugin * ofagent: Upgrade note about firewall\_driver * Return port context from \_bind\_port\_if\_needed * MLNX Agent: Process port\_update notifications in the main agent loop * Fix session's InvalidRequestError because of nested rollback * Remove unneeded device\_owner field from l2pop tuple * ofagent: Remove network\_delete method * Do not assume order of parameters in OVSBridge.add\_flow call * Fix to throw correct error code for bad attribute * Improve external gateway update handling * Do not assume order of pci slot list * DeferredBridge to allow add\_tunnel\_port passthru * Enabled Cisco ML2 driver to use new upstream ncclient * Fix to enable L2pop to serve DVR * Remove duplicated check for router connect to external net * ofagent: Add a missing normalized\_port\_name * Return 403 instead of 404 on attr policy failures * Proper validation for inserting firewall rule * Imported Translations from Transifex * Ensure assertion matches dict iter order in test * Fix 500 error during router-update for dvr routers * Simple refactor to stop passing around an unused parameter * Make \_build\_uri\_path output predictable * Radware: When a pip is needed, reuse the Port * Remove redundant topic from rpc calls * l3\_db: refactor L3\_NAT\_DB\_mixin * OVS flows apply concurrently using a deferred OVSBridge * Do not assume order of network\_uuid's * Big Switch: Only update hash header on success * ofagent: Stop monitoring ovsdb for port changes * ofagent: Desupport ancillary bridges * Add a tox test environment for random hashseed testing * OFAgent: Implement arp responder * Updated from global requirements * Do not assume order of quotas dictionary elements * Move Cisco VPN RESTapi URI strings to constants * Remove ignored do\_request timeout argument * Move from Python logging to Openstack logging * Imported Translations from Transifex * NSX: remove duplicate call to set\_auth\_cookie() * NSX: Correct default timeout params * Remove reference to cisco\_cfg\_agent.ini from setup.cfg * Exit Firewall Agent if config is invalid * Fix spelling mistakes * Fix DB Duplicate error when scheduling distributed routers * Imported Translations from Transifex * Make ML2 ensure\_dvr\_port\_binding more robust * centralized router is incorrectly scheduled * Fix-DVR Gateway clear doesn't delete csnat port * Fix spelling in get\_plugin\_interface docstring * Use storage engine when creating tables in migrations * Removed configobj from test requirements * Implement Midonet Juno Network Api calls * Add missing ml2 plugin to migration 1fcfc149aca4 * Replace nullable from primary keys in tz\_network\_bindings with default * Use correct section for log message if interface\_driver import fails * Make sure that gateway is in CIDR range by default * test\_l3\_plugin: L3AgentDbInteTestCase L3AgentDbSepTestCase fails * Add L3 Scheduler Changes for Distributed Routers * Pass filters in arrays in get\_agent\_gw\_ports\_exist\_for\_network * Do not schedule network when creating reserved DHCP port * Check that router info is set before calling \_update\_arp\_entry * Move ARP responder test to sanity command * neutron.conf does not have the definition of firewall quotas * Fix wrong order of tables in downgrade * Fix deprecated opt in haproxy driver * Race condition of L3-agent to add/remove routers * Replaced the strings with respective constants * Make dvr\_vmarp\_table\_update call conditional to dvr extension * ofagent: Update a comment in port\_bound * Updated from global requirements * Set promote\_secondaries when creating namespaces * Functional tests work fine with random PYTHONHASHSEED * Call config\_parse in base test setup * ML2 additions to support DVR * Make test\_l3\_agent.\_prepare\_router\_data a module function * Remove redundant code in tests/unit/test\_l3\_agent * Fix ML2 Plugin binding:profile update * Set python hash seed to 0 in tox.ini * Add definition for new VIF type * Configuration agent for Cisco devices * Handle bool correctly during \_extend\_extra\_router\_dict * Encapsulate some port properties in the PortContext * Changes to remove the use of mapping tables from Nuage plugin * Updated from global requirements * Log exceptions inside spawned functions * Correct misspelled variable name * Avoid RequestURITooLong exception in metadata agent * Move loadbalancer vip port creation outside of transaction * Define some abstract methods in VpnDriver class * ML2 mechanism driver for SR-IOV capable NIC based switching, Part 2 * Modify L3 Agent for Distributed Routers * Audited attribute for policy update not changing * OFAgent: Share codes of l2-population in OVS agent * This patch changes the name of directory from mech\_arista to arista * ML2 mechanism driver for SR-IOV capable NIC based switching, Part 1 * Add rule for updating network's router:external attribute * L2 Agent-side additions to support DVR * Imported Translations from Transifex * NSX: fix router ports port\_security\_enabled=False * Add partial specs support in ML2 for multiprovider extension * Add partial specs support in ML2 for gre/vxlan provider networks * Set nullable=False on tenant\_id in apic\_contracts table * call security\_groups\_member\_updated in port\_update * The default value of quota\_firewall\_rule should not be -1 * Correct LOG.debug use * Fix incorrect downgrade * Fix spelling mistake in the log message * Imported Translations from Transifex * Support Router Advertisement Daemon (radvd) for IPv6 * Move plugin.delete\_port call out of transaction * Add partial specs support in ML2 for vlan provider networks * ML2: Update a comment after the recent bind\_port change * NSX: fix validation logic on network gateway connect * Initialize RpcProxy objects correctly * Fix DVR regression for ofagent * RPC additions to support DVR * no quota for allowed address pair * Allow to import \_LC, \_LE, \_LI and \_LW functions directly * L2 Model additions to support DVR * Fixed audit notifications for dhcp-agent-network * Make readme reference git.openstack.org not github * Fix enums usage for postgres in migrations * Return a tuple of None's instead of one None * Fix a log typo in ML2 manager.bind\_port() * Big Switch: Remove consistency hash on full sync * VPNaaS: Separate validation for Cisco impl * VPNaaS: separate out validation logic for ref impl * VMWare: don't notify on disassociate\_floatingips() * Add L3 Extension for Distributed Routers * VPNaaS Cisco REST client enhance CSR create * Bump hacking to version 0.9.2 * Log methods using rpc communcation * Fixes port update failure when device ID is not updated * Support Quota extension in MidoNet plugin * NSX: Remove unneed call to \_ensure\_default\_security\_group * Use auth\_token from keystonemiddleware * update vsm credential correctly * Shamelessly removing commented print line * L3 agent prefers RPC messages over full sync * Dnsmasq config files syntax issue when dhcp\_domain is empty * Database healing migration * Fix incorrect default paramater in migration * Use method's logger in log decorator * Fixed audit notifications for l3-agent-router ops * Expand arp\_responder help text * Send network name and uuid to subnet create * Cisco: Fix test cases which make incorrect create requests * ML2: Bind ports outside transactions * Freeze models for healing migration * NSX: Optionally not enforce nat rule match length check * ofagent: Handle device name prefixes other than "tap" * Add -s option for neutron metering rules * Security groups extension for PLUMgrid plugin * Missing max\_routes in neutron.conf * Clear entries in Cisco N1KV specific tables on rollback * Allow unsharing a network used as gateway/floatingip * Change all occurences of no\_delete to do\_delete * Split up metering test case into plugin + test case * Use integer server\_default value for multicast\_ip\_index * Validate expected parameters in add/remove router interfaces * Revert "VPNaaS REST Client UT Broken" * Mock out tunnel\_sync in test to avoid sleeping * Add 'server\_default' parameter * Add BSN plugin to agent migration script * Move \_convert\_to\_nsx\_transport\_zones into nsx\_utils * Extract CommonDBMixin to a separate file * Remove dead helper function from test\_l3\_plugin * Added support for NOS version 4.1.0, 5.0.0 and greater * Remove reference to setuptools\_git * NSX: neutron router-interface-add should clear security-groups * Refactor 'if false do nothing' logic in l3 scheduler db * Imported Translations from Transifex * Add a gate-specific tox env for functional tests * NSX: remove unnecessary checks on network delete * Bump min required version for dnsmasq to 2.63 * Add CONTRIBUTING.rst * Do not mark device as processed if it wasn't * Fix 'server\_default' parameter usage in models * Fix missing migration default value * Add a link to a blog post by RedHat that discusses GRE tunnels in OVS * Updated from global requirements * VPNaaS REST Client UT Broken * Avoid notifying while inside transaction opened in delete\_port() * sync periodic\_task fix from incubator * Omit mode keyword when spawning dnsmasq with some ipv6 subnets * Fixed spelling mistake in securitygroups\_rpc * OVS agent: fix a comment on CANARY\_TABLE * ofagent: Fix an argument mismatch bug in commit 9d13ea88 * Fix UnboundLocalError raised during L3 router sync task * Updated from global requirements * Fix isinstance assertions * Imported Translations from Transifex * Allow setting a rootwrap cmd for functional tests * Fix OVSBridge.get\_port\_ofport to handle empty output * Ignore variable column widths in ovsdb functional tests * Add configurable http\_timeout parameter for Cisco N1K * NSX: fix indentations * BSN: Remove db lock and add missing contexts * NSX: properly handle floating ip status * Updated from global requirements * Fix example for running individual tests * Stop the dhcp-agent process when dnsmasq version is not determined * Switch to using of oslo.db * Replace occurences of 'test\_tenant' with 'test-tenant' in tests * lb-agent: ensure removed devices get treated on resyncs * Imported Translations from Transifex * Add sanity check for nova notification support * changes ovs agent to get bridges via ovs\_lib * Use correct MAX\_LEN constant in agent functional tests * remove unsupported middleware * Fix re-creation of the pool directory * Add config for performance gate job * Use patch ports to interconnect integration/physical bridges * Exit rpc\_loop when SIGTERM is recieved in ovs-agent * LBaaS new object model logging no-op driver * ofagent: Use port desc to monitor ports on br-int * Fixed dhcp & gateway ip conflict in PLUMgrid plugin * Introduce bulk calls for get device details * validate flat networks physical name * Remove \_\_init\_\_ method from TunnelCallback mixin * OVS agent: Correct bridge setup ordering * Revert "Revert "ovs-agent: Ensure integration bridge is created"" * Imported Translations from Transifex * Synced log module and its dependencies from olso-incubator * Pass newly created router to \_update\_router\_gw\_info * don't ignore rules that are already enforced * Updated neutron.conf to reflect new RPC options * Moved rpc\_compat.py code back into rpc.py * Updated from global requirements * Updated from global requirements * ofagent: move main module from ryu repository * Don't convert numeric protocol values to int * Imported Translations from Transifex * Revert "Check NVP router's status before deploying a service" * Remove the useless vim modelines * Imported Translations from Transifex * Changing the poll\_duration parameter type to int * Add test cases for plugins/ml2/plugin.py * Removed local modification in incubator code * Removed 'rpc' and 'notifier' incubator modules * Removed create\_rpc\_dispatcher methods * Use openstack.common.lockutils module for locks in tox functional tests * Pass serializer to oslo.messaging Notifier * Fix auto\_schedule\_networks to resist DBDuplicateEntry * Imported Translations from Transifex * Control active number of REST calls from Cisco N1kv plugin to VSM * Revert "ovs-agent: Ensure integration bridge is created" * ValueError should use '%' instead of ',' * NSX: return 400 if dscp set for trusted queue * NSX sync cache: add a flag to skip item deletion * NSX: propagate network name updates to backend * Renamed argument for create\_consumer[s] * Renamed consume\_in\_thread -> consume\_in\_threads * Renamed start\_rpc\_listener -> start\_rpc\_listeners * Port to oslo.messaging * Imported Translations from Transifex * Pass 'top' to remove\_rule so that rule matching succeeds * Big Switch: Stop watchdog on interval of 0 * Remove old quantum scripts * Move \_filter\_non\_model\_columns method to CommonDbMixin * Updated from global requirements * Ignore emacs checkpoint files * Big Switch: Lock consistency table for REST calls * Check port value when creating firewall rule with icmp protocol * Improve docstring for OVSNeutronAgent constructor * Big Switch ML2: sync detection in port-update * Imported Translations from Transifex * Remove SELECT FOR UPDATE use in ML2 type driver release\_segment * Add vlan type driver unittests * Make sure we call BaseTestCase.setUp() first * Don't explicitly call .stop() on mock.patch objects * Don't instantiate RPC clients on import * Configure agents using neutron.common.config.init (formerly .parse) * linuxbridge-agent: process port updates in the main loop * Notify systemd when starting Neutron server * Ensure entries in dnsmasq belong to a subnet using DHCP * Added missing core\_plugins symbolic names * Trigger provider security group update for RA * NSX: revert queue extension name change * Fix pool statistics for LBaaS Haproxy driver * Don't use root\_helper when it's not needed * Introduced rpc\_compat.create\_connection() * Copy-paste RPC Service class for backwards compatibility * Introduce RpcCallback class * Fix opt helpstring for dhcp\_lease\_duration * Consistently use jsonutils instead of specific implementation * Imported Translations from Transifex * Adding static routes data for members * remove pep8 E122 exemption and correct style * Change default netpartition behavior in nuage plugin * Add 'ip rule ...' support to ip\_lib * Add missing keyword raise to get\_profile\_binding function * Add logging for NSX status sync cache * Big Switch: Remove unnecessary initialization code * Big Switch: Import DB module in unit test * When l2-pop ON, clean stale ports in table0 br-tun * remove E112 hacking exemption and fix errors * Updated from global requirements * Allowed address pair: Removing check for overlap with fixed ips * NeutronManager: Remove explicit check of the existence of an attribute * Fix invalid IPv6 address used in FakeV6 variables * Improve vxlan type driver initialization performance * Floatingip extension support for nuage plugin * ovs-agent: Ensure integration bridge is created * Brocade mechanism driver depends on the brocade plugin templates * Brocade mechanism driver should be derived from ML2 plugin base class * changes ovs agent\_id init to use hostname instead of mac * multiprovidernet: fix a comment * Imported Translations from Transifex * Fix race condition with firewall deletion * extensions: remove 'check\_env' method * Check the validation of 'delay' and 'timeout' * Control update, delete for cisco-network-profile * Ensure routing key is specified in the address for a direct producer * Support Subnets that are configured by external RAs * Refactor code in update\_subnet, splitting into individual methods * Make allocation\_pools attribute of subnet updateable by PUT * Monkey patch threading module as early as possible * Introduced transition RPC exception types * Added RpcProxy class * ofagent: Fix VLAN usage for TYPE\_FLAT and TYPE\_VLAN * Big Switch: Catch exceptions in watchdog thread * Use import from six.moves to import the queue module * Start an unstarted patch in the hyperv unit tests * Imported Translations from Transifex * Fix NVP FWaaS occurs error when deleting a shared rule * Check NVP router's status before deploying a service * Add an option to turn off DF for GRE and VXLAN tunnels * Increase default metadata\_workers, backlog to 4096 * Big Switch: Add missing data to topology sync * Replace XML with JSON for N1kv REST calls * Big Switch: Call correct method in watchdog * Freescale SDN Mechanism Driver for ML2 Plugin * OVS Agent: limit veth names to 15 chars * Added note to neutron.conf * Return no active network if the agent has not been learnt yet * Sync service module from oslo-incubator * ovs, ofagent: Remove dead code * Default to setting secure mode on the integration bridge * Cisco APIC Layer 3 Service plugin * Allow neutron-sanity-check to check OVS patch port support * Remove run-time version checking for openvswitch features * Add flat type driver unittests * Changed DictModel to dict with attribute access * Pass object to policy when finding fields to strip * Allow L3 base to handle extensions on router creation * Refactor some router-related methods * Add local type driver unittests * add engine parameter for offline migrations * Check DB scheme prior to migration to Ml2 * Removes unnecessary Embrane module-level mocks * Improve module-level mocks in midonet tests * Big Switch: fix capabilities retrieval code * Improve iptables\_manager \_modify\_rules() method * NSX: bump http\_timeout to 30 seconds * Log firewall status on delete in case of status inconsistency * BSN: Set hash header to empty instead of False * Neutron does not follow the RFC 3442 spec for DHCP * LBaaS add missing rootwrap filter for route * Radware LBaaS driver is able to flip to a secondary backend node * NSX: fix invalid docstring * NSX: fix tenant\_id passed as security\_profile\_id * NSX: Fix request\_id in api\_client to increment * Improve usage of MagicMocks in ML2 and L3 tests * Improve readability of MagicMock use in RYU test * Remove function replacement with mock patch * Remove unnecessary MagicMocks in cisco unit tests * Handle errors from run\_ofctl() when dumping flows * Sync periodic\_task from oslo-incubator * Added missing plugin .ini files to setup.cfg * Imported Translations from Transifex * Make linux.utils.execute log error on return codes * FWaaS plugin doesn't need to handle firewall rule del ops * Reprogram flows when ovs-vswitchd restarts * Revert "fix openvswitch requirement check" * Updated from global requirements * Fix KeyError exception while updating dhcp port * NSX: fix bug for flat provider network * Disallow regular user to update firewall's shared attribute * Support 'infinite' dhcp\_lease\_duration * l2-pop : removing a TODO for the delete port use case * NEC plugin: Bump L3RPC callback version to 1.1 * Synced jsonutils from oslo-incubator * Imported Translations from Transifex * fix openvswitch requirement check * NSX: replace strong references to the plugin with weakref ones * Fixes bugs for requests sent to SDN-VE controller * Install SNAT rules for ipv4 only * Imported Translations from Transifex * Add NVP advanced service check before deleting a router * Disallow 'timeout' in health\_monitor to be negative * Remove redundant default=None for config options * Fix for multiple misspelled words * Use list copy for events in nova notifier * Extraroute extension support for nuage plugin * OFAgent: Fixing lost vlan ids on interfaces * Set onlink routes for all subnets on an external network * Cisco APIC ML2 mechanism driver, part 2 * Remove all mostly untranslated PO files * remove token from notifier middleware * NSX: get rid of the last Nicira/NVP bits * Metadata agent caches networks for routers * Common decorator for caching methods * Make pid file locking non-blocking * Allowed Addresspairs: Removing check for overlap with fixed ips * Do not defer IPTables apply in firewall path * Metaclass Python 3.x Compatibility * Fix non-existent 'assert' calls to mocks * Log iptables rules when they fail to apply * Remove hard dependency on novaclient * Provide way to reserve dhcp port during failovers * Imported Translations from Transifex * Implement local ARP responder onto OVS agent * Fix typos in ovs\_neutron\_agent.py * Allow vlan type usage for OpenDaylight ml2 * NSX: do not raise on missing router during migration step * NSX: fix error when creating VM ports on subnets without dhcp * NSX: allow net-migration only in combined mode * OFAgent: Avoid processing ports which are not yet ready * Add missing translation support * Reorg table ml2\_port\_bindings when db migration * Remove unused parameter * NSX: Do a single query for all gateway devices * Add mailmap entry * Add 'secret' property for 'connection' option * NSX: Do not extend fault map for network gateway ext * Ensure tenant owns devices when creating a gateway * Corrected the syntax of port\_update call to NVSD agent * Fix some typos in neutron/db and IBM SDN-VE plugin * Fix issubclass() hook behavior in PluginInterface * Imported Translations from Transifex * LBaaS VIP doesn't work after delete and re-add * OVS lib defer apply doesn't handle concurrency * Big Switch: Don't use MagicMocks unnecessarily * Make plugin deallocation check optional * Restore GARP by default for floating IPs * Ensure core plugin deallocation after every test * Updated from global requirements * Big Switch: Check source\_address attribute exists * Revert "Big Switch: Check source\_address attribute exists" * ML2 VxlanTypeDriver: Synchronize of VxlanAllocation table * Start ping listener also for postgresql * ofagent: Add a missing push\_vlan action * NSX: ensure that no LSN is created on external networks * Make VPNaaS 'InUse' exception more clear * Remove explicit dependency on amqplib * Revert "Disable debug messages when running unit tests" * eswitch\_neutron\_agent: Whitespace fixes in comments * Upgrade failure for DB2 at ml2\_binding\_vif\_details * Remove duplicate module-rgx line in .pylintrc * Disable debug messages when running unit tests * Perform policy checks only once on list responses * Allow DHCPv6 solicit from VM * Fix importing module in test\_netscaler\_driver * Record and log reason for dhcp agent resync * Big Switch: Check source\_address attribute exists * L3 RPC loop could delete a router on concurrent update * Adding tenant-id while creating Radware ADC service * Fix H302 violations * Fix H302 violations in plugins package * Fix H302 violations in unit tests * Imported Translations from Transifex * lbaas on a network without gateway * Optimize querying for security groups * NSX: pass the right argument during metadata setup * Improve help strings for radware LbaaS driver * Fix network profile subtype validation in N1kv plugin * Performance improvement of router routes operations * Add support to dynamically upload drivers in PLUMgrid plugin * Imported Translations from Transifex * Reference new get\_engine() method from wsgi.py * Allow test\_l3\_agent unit test to run individually * tests/unit: refactor reading neutron.conf.test * Don't print duplicate messages on SystemExit * Unit test cases for quota\_db.py * Cisco VPN device driver - support IPSec connection updates * OVS and OF Agents: Create updated\_ports attribute before setup\_rpc * Imported Translations from Transifex * Updated from global requirements * Synced jsonutils from oslo-incubator * Imported Translations from Transifex * NSX: fix migration for networks without a subnet * Allow ML2 plugin test cases to be run independently * Removed signing\_dir from neutron.conf * Add physical\_network to binding:vif\_details dictionary * Database exception causes UnboundLocalError in linuxbridge-agent * Wrong key router.interface reported by ceilometer * Imported Translations from Transifex * NSX: fix API payloads for dhcp/metadata setup * Improve ODL ML2 Exception Handling * NSX: change api mapping for Service Cluster to Edge Cluster * Fix protocol value for SG IPV6 RA rule * Cisco APIC ML2 mechanism driver, part 1 * LBaaS: remove orphan haproxy instances on agent start * Fixed floating IP logic in PLUMgrid plugin * Segregate the VSM calls from database calls in N1kv plugin * NSX: add nsx switch lookup to dhcp and metadata operations * Use set\_gateway from ip\_lib * Fix incorrect usage of sa.String() type * Re-submit "ML2 plugin should not delete ports on subnet deletion" * LBaaS: Set correct nullable parameter for agent\_id * Vmware: Set correct nullable for lsn\_id, nsx\_port\_id * IBM: set secret=True on passwd config field * Restore ability to run functional tests with run\_tests.sh * Fix H302 violations in extensions package * Sync db code from oslo-incubator * Imported Translations from Transifex * Remove List events API from Cisco N1kv Neutron * NSX: Fix fake\_api\_client to raise NotFound * Replace loopingcall in notifier with a delayed send * ip-lib : use "ip neigh replace" instead of "ip neigh add" * Add 2-leg configuration to Radware LBaaS Driver * Fix H302 violations in db package and services * Cisco: Set correct nullable for switch\_ip, instance\_id, vlan\_id * Ml2: Set correct nullable for admin\_state\_up * Drop service\* tables only if they exist * Updated from global requirements * Make help texts more descriptive in Metaplugin * ML2 Cisco Nexus MD: Improve Unit Test Coverage * Fix migration that breaks Grenade jobs * Fix incorrect change of Enum type * allow delete\_port to work when there are multiple floating ips * Add nova\_ca\_certificates\_file option to neutron * gw\_port should be set as lazy='join' * netaddr<=0.7.10 raises ValueError instead of AddrFormatError * Imported Translations from Transifex * netaddr<=0.7.10 raises ValueError instead of AddrFormatError * Validate IPv6 modes in API when IP version is 4 * Add 'ip neigh' to ip\_lib * OFAgent: Improve handling of security group updates * OFAgent: Process port\_update notifications in the main agent loop * NSX: sync thread catches wrong exceptions on not found * Notifier: Catch NotFound error from nova * Switch over to FixedIntervalLoopingCall * Check if bridge exists and make sure it's UP in ensure\_bridge * Validate CIDR given as ip-prefix in security-group-rule-create * Support enhancements to Cisco CSR VPN REST APIs * Fix uninitialized variable reference * Nuage Plugin: Delete router requires precommit checks * Delete DHCP port without DHCP server on a net node * Improved quota error message * Remove device\_exists in LinuxBridgeManager * Add support for multiple RPC workers under Metaplugin * Security Group rule validation for ICMP rules * Fix Metering doesn't respect the l3 agent binding * DHCP agent should check interface is UP before adding route * Remove workaround for bug #1219530 * Fix LBaaS Haproxy occurs error if no member is added * Add functional tests to verify ovs\_lib VXLAN detection * Add nova\_api\_insecure flag to neutron * Allow combined certificate/key files for SSL * Verify ML2 type driver exists before calling del * Fix dangling patches in Cisco and Midonet tests * Make default nova\_url use a version * ML2 Cisco Nexus MD: Remove unnecessary Cisco nexus DB * NSX plugin: fix get\_gateway\_devices * Exclude .ropeproject from flake8 checks * Register LBaaS resources to quotas engine * Remove mock.patch.stop from tests that inherit from BaseTestCase * Reschedule router if new external gateway is on other network * Update ensure()/reconnect() to catch MessagingError * Properly apply column default in migration pool\_monitor\_status * Remove "reuse\_existing" from setup method in dhcp.py * Enable flake8 E711 and E712 checking * Fixes Hyper-V agent security groups disabling * Fixes Hyper-V agent security group ICMP rules * Fix typo in ml2 configuration file * Edge firewall: improve exception handling * Edge driver: Improve exception handling * Fix typo in comment * NSX: Fix KeyError in sync if nsx\_router\_id not found * VMware: log backend port creation in the right place * Revert "Hide ipv6 subnet API attributes" * BigSwitch: Create router ports synchronously * NSX: ensure dhcp port is setup on metadata network * Hide ipv6 subnet API attributes * Set correct columns' length * Enforce required config params for ODL driver * Add L2 Agent side handling for non consistent security\_group settings * BSN: Remove module-level ref to httplib method * BigSwitch: Stop HTTP patch before overriding * Typographical correction of Arista ML2 help * Fix wrong section name "security\_group" in sample config files * Set the log level to debug for loading extensions * Updated from global requirements * set api.extensions logging to ERROR in unit tests * Add common base class for agent functional tests * Remove RPC to plugin when dhcp sets default route * Imported Translations from Transifex * Add missing comma in nsx router mappings migration * OFAgent: Avoid re-wiring ports unnecessarily * BigSwitch: Improves server manager UT coverage * BigSwitch: Don't import portbindings\_db until use * lb-agent: fix get\_interfaces\_on\_bridge returning None * Clean out namespaces even if we don't delete namespaces * Call policy.init() once per API request * ofa\_neutron\_agent: Fix \_phys\_br\_block\_untranslated\_traffic * Don't emit log for missing attribute check policy * Sync service and systemd modules from oslo-incubator * Imported Translations from Transifex * Move bash whitelisting to pep8 testenv * Fix test MAC addresses to be valid * ML2: ODL driver sets port status * Add a note that rpc\_workers option is experimental * Fix Jenkins translation jobs * Redundant SG rule create calls in unit tests * Set ns\_name in RouterInfo as attribute * Replace HTTPSConnection in NEC plugin * ignore build directory for pep8 * Imported Translations from Transifex * Delete routers that are requested but not reported as active * Explicitly import state\_path opt in tests.base * fixes tests using called\_once\_ without assert * Remove invalid copyright headers under API module * update doc string - correct typo * Revert changes removing OVSBridge return * fixes broken neutron-netns-cleanup * Remove duplicated tests for check\_ovs\_vxlan\_version * Permit ICMPv6 RAs only from known routers * Return 409 for second firewall creation * OFA agent: use hexadecimal IP address in tunnel port name * Fixing Arista CLI command * use floatingip's ID as key instead of itself * Use a temp dir for CONF.state\_path * Use os.uname() instead of calling uname in subprocess * Enable hacking H301 check * Stop using portbindings\_db in BSN ML2 driver * NSX: Fix pagination support * Removing vim header lines * Fix function parsing the kernel version * Updated from global requirements * Restore NOT NULL constraint lost by earlier migrations * BigSwitch: Semaphore on port status update * Remove last parts of Quantum compatibility shim * Imported Translations from Transifex * Fix quota\_health\_monitor opt name in neutron.conf * Add missing DB migrations for BSN ML2 plugin * Only send notifications on uuid device\_id's * Add Icehouse no-op migration * Add support for https requests on nova metadata * Delete disassociated floating ips on external network deletion * Imported Translations from Transifex * Invoke \_process\_l3\_create within plugin session * Invalid ovs-agent test case - test\_fdb\_add\_flows * Add missing parameters for port creation * Move test\_ovs\_lib to tests/unit/agent/linux * Update BigSwitch Name to its correct name * Cancelling thread start while unit tests running * Delete duplicate external devices in router namespace * Deals with fails in update\_\*\_postcommit ops * ML2 Cisco Nexus MD: Support portchannel interfaces * Changed the message line of RouterInUse class * UT: do not hide an original error in test resource ctxtmgr * BigSwitch: Move attr ref after error check * Fix namespace exist() method * Make dnsmasq aware of all names * Open Juno development * Prevent cross plugging router ports from other tenants * Adds OVS\_HYBRID\_PLUG flag to portbindings * Disable XML tests on Py26 * Subnets should be set as lazy='join' * nec plugin: allow to delete resource with ERROR status * Synced rpc and gettextutils modules from oslo-incubator * Import request\_id middleware bug fix from oslo * Add unit test for add\_vxlan in test\_linux\_ip\_lib * Start using oslosphinx theme for docs * Migrate data from cap\_port\_filter to vif\_details * Imported Translations from Transifex * Include cisco plugin in migration plugins with ovs * ML2 Cisco Nexus MD: Remove workaround for bug 1276395 * Fixed TypeError when creating MlnxException * Replace a usage of the deprecated root\_helper option * Cisco VPN driver correct reporting for admin state chg * Add script to migrate ovs or lb db to ml2 db * Correct OVS VXLAN version check * LBaaS: make device driver decide whether to deploy instance * NSX plugin: return 400 for invalid gw certificate * Imported Translations from Transifex * Remove extra space in help string * Add enable\_security\_group to BigSwitch and OneConvergence ini files * Add nec plugin to allowed address pairs migration * Imported Translations from Transifex * Fix segment allocation tables in Cisco N1kv plugin * Updated from global requirements * NEC plugin: Rename quantum\_id column to neutron\_id * Log received pool.status * NEC plugin: Allow to add prefix to OFC REST URL * NEC plugin: Remove a colon from binding:profile key due to XML problem * rename ACTIVE\_PENDING to ACTIVE\_PENDING\_STATUSES * VPNaaS support for VPN service admin state change and reporting * Use save\_and\_reraise\_exception when reraise exception * Return meaningful error message on pool creation error * Don't set priority when calling mod\_flow * Avoid creating FixedIntervalLoopingCall in agent UT * Imported Translations from Transifex * Big Switch Plugin: No REST port delete on net del * Add enable\_security\_group option * Get rid of additional db contention on fetching VIP * Fix typo in lbaas agent exception message * De-duplicate unit tests for ports in Big Switch * ML2: Remove validate\_port\_binding() and unbind\_port() * Imported Translations from Transifex * Fix duplicate name of NVP LBaaS objs not allowed on vShield Edge * tests/unit: clean up notification driver * Use different name for the same constraint * Add a semaphore to some ML2 operations * Log dnsmasq host file generation * add HEAD sentinel file that contains migration revision * Added config value help text in ns metadata proxy * Fix usage of save\_and\_reraise\_exception * Cisco VPN device driver post-merge cleanup * Fixes the Hyper-V agent individual ports metrics * Sync excutils from oslo * BigSwitch ML2: Include bound\_segment in port * NEC plugin: Honor Retry-After response from OFC * Add update binding:profile with physical\_network * return false or true according to binding result * Enable to select an RPC handling plugin under Metaplugin * Ensure to count firewalls in target tenant * Mock agent RPC for FWaaS tests to delete DB objs * Allow CIDRs with non-zero masked portions * Cisco plugin fails with ParseError no elem found * Cisco Nexus: maximum recursion error in ConnectionContext.\_\_del\_\_ * Don't use root to list namespaces * Fixes Hyper-V agent security groups enable issue * ML2 BigSwitch: Don't modify parent context * Advanced Services documentation * LBaaS: small cleanup in agent device driver interface * Change report\_interval from 4 to 30, agent\_down\_time from 9 to 75 * Stop removing ip allocations on port delete * Imported Translations from Transifex * Ignore PortNotFound exceptions on lockless delete * Show neutron API request body with debug enabled * Add session persistence support for NVP advanced LBaaS * Fix misleading error message about failed dhcp notifications * NSX: Fix router-interface-delete returns 404 when router not in nsx * Fix \_validate\_mac\_address method * BigSwitch: Watchdog thread start after servers * Calculate stateless IPv6 address * Create new IPv6 attributes for Subnets * Remove individual cfg.CONF.resets from tests * BigSwitch: Sync workaround for port del deadlock * NSX: Ensure gateway devices are usable after upgrade * Correctly inherit \_\_table\_args\_\_ from parent class * Process ICMP type for iptables firewall * Imported Translations from Transifex * Added missing l3\_update call in update\_network * ML2 plugin involves in agent\_scheduler migration * Imported Translations from Transifex * Avoid long transaction in plugin.delete\_ports() * cisco: Do not change supported\_extension\_aliases directly * Fix KeyError except on router\_info in FW Agent * NSX: remove last of unneed quantum references * NSX: fix intermetting UT failure on vshield test\_router\_create * Bugfix and refactoring for ovs\_lib flow methods * Send fdb remove message when a port is migrated * Imported Translations from Transifex * Send network-changed notifications to nova * Notify nova when ports are ready * Skip radware failing test for now * NSX: Propagate name updates for security profiles * Fix in admin\_state\_up check function * NSX: lower the severity of messages about VIF's on external networks * Kill 'Skipping unknown group key: firewall\_driver' log trace * Imported Translations from Transifex * API layer documentation * BigSwitch: Use eventlet.sleep in watchdog * Embrane LBaaS Driver * BigSwitch: Widen range of HTTPExceptions caught * Fix ml2 & nec plugins for allowedaddresspairs tests * Fix unittest failure in radware lbaas driver * Removes calls to mock.patch.stopall in unit tests * Stop mock patches by default in base test class * Query for port before calling l3plugin.disassociate\_floatingips() * Optimize floating IP status update * NSX: Allow multiple references to same gw device * VPNaaS Device Driver for Cisco CSR * Updated from global requirements * BigSwitch: Fix certificate file helper functions * Create agents table when ML2 core\_plugin is used * Fix usage of sqlalchemy type Integer * Fixing lost vlan ids on interfaces * Fix bug:range() is not same in py3.x and py2.x * Call target plugin out of DB transaction in the Metaplugin * NSX: Sync do not pass around model object * NSX: Make replication mode configurable * Updated from global requirements * Fix ml2 db migration of subnetroutes table * Imported Translations from Transifex * After bulk create send DHCP notification * Fix lack of extended port's attributes in Metaplugin * Add missing ondelete option to Cisco N1kv tables * Migration support for Mellanox Neutron plugin * Imported Translations from Transifex * Imported Translations from Transifex * Updated from global requirements * Add support for tenant-provided NSX gateways devices * NSX: fix nonsensical log trace on update port * BigSwitch: Fix rest call in consistency watchdog * BigSwitch: Fix cfg.Error format in exception * BigSwitch: Fix error for server config check * Fixed Spelling error in Readme * Adds state reporting to SDN-VE agent * Fix unittest failure in radware lbaas driver * Log configuration values for OFA agent * NSX: Add ability to retry on 503's returned by the controller * Cisco Neutron plugin fails DB migration * Floatingip\_status migration not including Embrane's plugin * One Convergence Neutron Plugin l3 ext support * Nuage plugin was missed in floatingip\_status db migration script * ML2 Cisco Nexus MD: VM migration support * Drop old nvp extension file * Makes the Extension loader behavior predictable * One Convergence Neutron Plugin Implementation * NEC plugin: delete old OFC ID mapping tables * Imported Translations from Transifex * Fix typo in migration script * Enhance GET networks performance of metaplugin * Adds the missing migration for gw\_ext\_mode * BigSwitch: Add SSL Certificate Validation * BigSwitch: Auto re-sync on backend inconsistencies * VPNaaS Service Driver for Cisco CSR * Updated from global requirements * Add OpenDaylight ML2 MechanismDriver * Replaces network:\* strings by constants * Check vxlan enablement via modinfo * Do fip\_status migration only for l3-capable plugins * Fix race condition in update\_floatingip\_statuses * Implementaion of Mechanism driver for Brocade VDX cluster of switches * NSX: passing wrong security\_group id mapping to nsx backend * Avoid unnecessarily checking the existence of a device * Refactor netns.execute so that it is not necessary to check namespace * Minor refactoring for Hyper-V utils and tests * Adds Hyper-V Security Groups implementation * Rename migration lb\_stats\_needs\_bigint to match revision number * Imported Translations from Transifex * NVP LBaaS: check for association before deleting health monitor * Different class names for VPNaaS migrations * ML2: database needs to be initalized after drivers loaded * replace rest of q\_exc to n\_exc in code base * Adds multiple RPC worker processes to neutron server * NEC plugin: PFC packet fitler support * Fix NVP/Nicira nits * Remove unused method update\_fixed\_ip\_lease\_expiration * NSX: nicira\_models should import model\_base directly * NSX: make sync backend run more often * Embrane Plugin fails alembic migrations * Implement Mellanox ML2 MechanismDriver * Use database session from the context in N1kv plugin * Delete subnet fails if assoc port has IPs from another subnet * Remove nvplib and move utility methods into nsxlib * BigSwitch: Add address pair support to plugin * Remove unused 'as e' in exception blocks * Remove vim line from db migartion template * Imported Translations from Transifex * Support advanced NVP IPsec VPN Service * Improves Arista's ML2 driver's sync performance * Fix NVP FWaaS errors when creating firewall without policy * Remove call to addCleanup(cfg.CONF.reset) * nec plugin: Avoid long transaction in delete\_ports * Avoid using "raise" to reraise with modified exception * Imported Translations from Transifex * Implement OpenFlow Agent mechanism driver * Finish off rebranding of the Nicira NVP plugin * Log configuration values for OVS agent * BigSwitch: Asynchronous rest calls for port create * Introduce status for floating IPs * BigSwitch: Add agent to support neutron sec groups * N1kv: Fixes fields argument not None * Adds the new IBM SDN-VE plugin * Imported Translations from Transifex * Nuage Networks Plugin * Fixes spelling error Closes-Bug: #1284257 * Openvswitch update\_port should return updated port info * Updated from global requirements * Remove unused variable * Change firewall to DOWN when admin state down * ovs-agent: use hexadecimal IP address in tunnel port name * NSX: add missing space 'routeron' * Imported Translations from Transifex * Fix DetachedInstanceError for Agent instance * Update License Headers to replace Nicira with VMware * Renaming plugin-specific exceptions to match NSX * Imported Translations from Transifex * DB Mappings for NSX security groups * NSX: port status must reflect fabric, not link status * Typo/grammar fixes for the example neutron config file * NSX: Pass NSX uuid when plugging l2 gw attachment * stats table needs columns to be bigint * Remove import extension dep from db migration * Fix get\_vif\_port\_by\_id to only return relevant ports * Developer documentation * Fix NSX migration path * ML2 mechanism driver access to binding details * Add user-supplied arguments in log\_handler * Imported Translations from Transifex * NSX: Fix newly created port's status should be DOWN * BigSwitch: Stop using external locks * Rename/refactoring of NVP api client to NSX * Remove pyudev dependency * Rename DB models and related resources for VMware NSX plugin * Lower log level of errors due to user requests to INFO * Include proper Content-Type in the HTTP response headers * LBaaS: check for associations before deleting health monitor * l2-population/lb/vxlan : ip neigh add command failed * l2-population : send flooding entries when the last port goes down * tests/service: consolidate setUp/tearDown logic * Ensure ovsdb-client is stopped when OVS agent dies * NSX: Fix status sync with correct mappings * Support Port Binding Extension in Cisco N1kv plugin * change Openstack to OpenStack in neutron * ML2 binding:profile port attribute * Rename/remove Nicira NVP references from VMware NSX unit tests * Fix webob.exc.HTTPForbidden parameter miss * Sync oslo cache with oslo-incubator * Change tenant network type usage for IB Fabric * options: consolidate options definitions * Replace binding:capabilities with binding:vif\_details * Make sure dnsmasq can distinguish IPv6 address from MAC address * Rename Neutron core/service plugins for VMware NSX * Make metaplugin be used with a router service plugin * Fix wrap target in iptables\_manager * BigSwitch: Fix tenant\_id for shared net requests * BigSwitch: Use backend floating IP endpoint * Updated from global requirements * Imported Translations from Transifex * Raise max header size to accommodate large tokens * NSX: get\_port\_status passed wrong id for network * Imported Translations from Transifex * Reset API naming scheme for VMware NSX plugin * remove pointless test TestN1kvNonDbTest * Rename Security Groups related methods for VMware NSX plugin * Rename L2 Switch/Gateway related methods for VMware NSX plugin * Rename Router related methods for VMware NSX plugin * Plugins should call \_\_init\_\_ of db\_base\_plugin for db.configure * Fixes Tempest XML test failures for Cisco N1kv plugin * Fixes broken documentation hyperlinks * Use "!=" instead of "is not" when comparing two values * ML2/vxlan/test: remove unnecessary self.addCleanup(cfg.CONF.reset) * Fix test\_db\_plugin.test\_delete\_port * Handle racing condition in OFC port deletion * Imported Translations from Transifex * Adds https support for metadata agent * Fix VPN agent does not handle multiple connections per vpn service * Don't require passing in port\_security=False if security\_groups present * wsgi.run\_server no longer used * Use different context for each API request in unit tests * Sync minimum requirements * Implements an LBaaS driver for NetScaler devices * vshield task manager: abort tasks in stop() on termination * Copy cache package from oslo-incubator * BigSwitch: Move config and REST to diff modules * Implements provider network support in PLUMgrid plugin * Should specify expect\_errors=False for success response * Fix unshortened IPv6 address caused DHCP crash * Add support to request vnic type on port * tests/unit: Initialize core plugin in TestL3GwModeMixin * Revert "Skip a test for nicira service plugin" * Improve unit test coverage for Cisco plugin model code * Imported Translations from Transifex * Fix class name typo in test\_db\_rpc\_base * Embrane Tempest Compliance * ipt\_mgr.ipv6 written in the wrong ipt\_mgr.ipv4 * Update help message of flag 'enable\_isolated\_metadata' * Imported Translations from Transifex * Fix invalid facilities documented in rootwrap.conf * Reset the policy after loading extensions * Fix typo in service\_drivers.ipsec * Validate rule uuids provided for update\_policy * Add update from agent to plugin on device up * Remove dependent module py3kcompat * Delete duplicate internal devices in router namespace * Use six.StringIO/BytesIO instead of StringIO.StringIO * Parse JSON in ovs\_lib.get\_vif\_port\_by\_id * Imported Translations from Transifex * Skip a test for nicira service plugin * Remove DEBUG:....nsx\_cluster:Attribute is empty or null * Fix request timeout errors during calls to NSX controller * remove unused imports * L3 agent fetches the external network id once * Avoid processing ports which are not yet ready * Ensure that session is rolled back on bulk creates * Add DB mappings with NSX logical routers * Use save\_and\_reraise\_exception when reraise exception * nec plugin: Compare OFS datapath\_id as hex int * Use six.moves.urllib.parse instead of urlparse * Rename Queue related methods for VMware NSX plugin * Lowercase OVS sample config section headers * Add DB mappings with NSX logical switches * NSX: Fix possible deadlock in sync code * Raise an error from ovs\_lib list operations * Add additional unit tests for the ML2 plugin * Fix ValueError in ip\_lib.IpRouteCommand.get\_gateway() * Imported Translations from Transifex * Fix log-related tracebacks in nsx plugin * add router\_id to response for CRU on fw/vip objs * Move db migration of ml2 security groups to havana * Sync latest oslo.db code into neutron * Add support for router scheduling in Cisco N1kv Plugin * Imported Translations from Transifex * Add migration support from agent to NSX dhcp/metadata services * Validate multicast ip range in Cisco N1kv Plugin * NSX plugin: fix floatingip re-association * Re-enable lazy translation * Do not append to messages with + * Remove psutil dependency * Remove legacy quantum config path * LBaaS: move agent based driver files into a separate dir * mailmap: update .mailmap * Fix binding:host\_id is set to None when port update * Return request-id in API response * Skip extra logging when devices is empty * Add extraroute\_db support for Cisco N1kv Plugin * Improve handling of security group updates * ML2 plugin cannot raise NoResultFound exception * Fix typo in rootwrap files: neuton -> neutron * Imported Translations from Transifex * Prepare for multiple cisco ML2 mech drivers * ML2 Cisco Nexus MD: Create pre/post DB event handlers * Support building wheels (PEP-427) * NVP plugin:fix delete sec group when backend is out of sync * Use oslo.rootwrap library instead of local copy * Fix misspellings in neutron * Remove unnecessary call to get\_dhcp\_port from DeviceManager * Refactor to remove \_recycle\_ip * Allow multiple DNS forwarders for dnsmasq * Fix passing keystone token to neutronclient instance * Don't document non-existing flag '--hide-elapsed' * Fix race condition in network scheduling to dhcp agent * add quota support for ryu plugin * Imported Translations from Transifex * Enables BigSwitch/Restproxy ML2 VLAN driver * Add and update subnet properties in Cisco N1kv plugin * Fix error message typo * Configure floating IPs addresses after NAT rules * Add an explicit tox job for functional tests * improve UT coverage for nicira\_db operations * Avoid re-wiring ports unnecessarily * Process port\_update notifications in the main agent loop * Base ML2 bulk support on the loaded drivers * Imported Translations from Transifex * Removes an incorrect and unnecessary return * Reassign IP to vlan interface when deleting a VLAN bridge * Imported Translations from Transifex * Change metadata-agent to have a configurable backlog * Sync with commit-id: 9d529dd324d234d7aeaa3e6b4d3ab961f177e2ed * Remove unused RPC calls from n1kv plugin code * Change metadata-agent to spawn multiple workers * Extending quota support for neutron LBaaS entities * Tweak version nvp/nsx version validation logic for router operations * Simplify ip allocation/recycling to relieve db pressure * Remove unused code * Reduce severity of log messages in validation methods * Disallow non-admin users update net's shared attribute * Fix error while connecting to busy NSX L2 Gateway * Remove extra network scheduling from vmware nsx plugin * L3 Agent restart causes network outage * Remove garbage in vim header * Enable hacking H233 rule * Rename nvp\_cluster for VMware NSX plugin * Minimize the cost of checking for api worker exit * Remove and recreate interface if already exists * Use an independent iptables lock per namespace * Report proper error message in PLUMgrid Plugin * Fix interprocess locks for run\_tests.sh * Clean up ML2 Manager * Expunge session contents between plugin requests * Remove release\_lease from the DHCP driver interface * VMware NSX: add sanity checks for NSX cluster backend * Update RPC code from oslo * Fix the migration adding a UC to agents table * Configure plugins by name * Fix negative unit test for sec group rules * NVP: Add LOG.exception to see why router was not created * Add binding:host\_id when creating port for probe * Fix race condition in delete\_port method. Fix update\_port method * Use information from the dnsmasq hosts file to call dhcp\_release * Fix pip install failure due to missing nvp.ini file * Imported Translations from Transifex * Imported Translations from Transifex * Make timeout for ovs-vsctl configurable * Remove extra whitespace * Fix extension description and remove unused exception * Fix mistake in usage drop\_constraint parameters * Fix race condition on ml2 delete and update port methods * Fix Migration 50e86cb2637a and 38335592a0dc * L3 Agent can handle many external networks * Update lockutils and fixture in openstack.common * Add test to port\_security to test with security\_groups * LBaaS: handle NotFound exceptions in update\_status callback * VMware NSX: Fix db integrity error on dhcp port operations * Use base.BaseTestCase in NVP config test * Remove plugin\_name\_v2 and extension\_manager in test\_config * Enables quota extension on BigSwitch plugin * Add security groups tables for ML2 plugin via migration * Rename nicira configuration elements to match new naming structure * Fix race in get\_network(s) in OVS plugin * Imported Translations from Transifex * Fix empty network deletion in db\_base\_plugin for postgresql * Remove unused imports * nicira: fix db integrity error during port deletion * Rename check\_nvp\_config utility tool * Remove redundant codes * Remove dupl. for get\_resources in adv. services * Start of new developer documentation * Fix NoSuchOptError in lbaas agent test * Corrects broken format strings in check\_i18n.py * [ML2] l2-pop MD handle multi create/delete ports * Dnsmasq uses all agent IPs as nameservers * Imported Translations from Transifex * BigSwitch: Fixes floating IP backend updates * neutron-rootwrap-xen-dom0 handles data from stdin * Remove FWaaS Noop driver as default and move to unit tests dir * Send DHCP notifications regardless of agent status * Mock looping\_call in metadata agent tests * Imported Translations from Transifex * Change default eswitchd port to avoid conflict * Midonet plugin: Fix source NAT * Add support for NSX/NVP Metadata services * Update the descriptions for the log cfg opts * Add VXLAN example to ovs\_neutron\_plugin.ini * Imported Translations from Transifex * ml2/type\_gre: Adds missing clear\_db to test\_type\_gre.py * ml2: gre, vxlan type driver can leak segment\_id * NVP: propagate net-gw update to backend * Imported Translations from Transifex * Nicira: Fix core\_plugin path and update default values in README * Include lswitch id in NSX plugin port mappings * Imported Translations from Transifex * Revert "move rpc\_setup to the last step of \_\_init\_\_" * extra\_dhcp\_opt add checks for empty strings * LBaaS: synchronize haproxy deploy/undeploy\_instance methods * NVP plugin: Do backend router delete out from db transaction * NVP plugin: Avoid timeouts if creating routers in parallel * Updates tox.ini to use new features * LBaaS: fix handling pending create/update members and health monitors * Add X-Tenant-ID to metadata request * Do not trigger agent notification if bindings do not change * fix --excluded of meter-label-rule-create is not working * move rpc\_setup to the last step of \_\_init\_\_ * Updated from global requirements * Sync global requirements to pin sphinx to sphinx>=1.1.2,<1.2 * Update common network type consts to same origin * Remove start index 0 in range() * LBaaS: unify haproxy-on-host plugin driver and agent * change variable name from plugin into agent * Imported Translations from Transifex * Add post-mortem debug option for tests * validate if the router has external gateway interface set * Remove root\_helper config from plugin ini * Fix a race condition in agents status update code * Add LeastRouters Scheduler to Neutron L3 Agent * Imported Translations from Transifex * Imported Translations from Transifex * Remove dead code \_arp\_spoofing\_rule() * Add fwaas\_driver.ini to setup.cfg * Switch to using spawn to properly treat errors during sync\_state * Fix a typo in log exception in the metering agent * Sync rpc fix from oslo-incubator * Do not concatenate localized strings * Imported Translations from Transifex * Removed erronus config file comment * Fix str2dict and dict2str's incorrect behavior * Improve unit test coverage for Cisco plugin common code * Change to improve dhcp-agent sync\_state * Fix downgrade in migration * Sync dhcp\_agent.ini with the codes * Imported Translations from Transifex * Handle failures on update\_dhcp\_port * Handle exceptions on create\_dhcp\_port * Imported Translations from Transifex * Add vpnaas and debug filters to setup.cfg * Fix misspells * Fix bad call in port\_update in linuxbridge agent * atomically setup ovs ports * Adds id in update\_floatingip API in PLUMgrid plugin driver * Sync Log Levels from OSLO * update error msg for invalid state to update vpn resources * Add missing quota flags in the config file sample * Imported Translations from Transifex * Fix unable to add allow all IPv4/6 security group rule * Add request timeout handling for Mellanox Neutron Agent * Revert "ML2 plugin should not delete ports on subnet deletion" * Improve OVS agent logging for profiling * l3\_agent: make process\_router more robust * Fixes missing method in Hyper-V Utils (Metering) * Fix metering iptables driver doesn't read root\_helper param * Updates .gitignore * Stop logging unnecessary warning on context create * Avoid loading policy when processing rpc requests * Improve unit test coverage for Cisco plugin base code * Pass in certain ICMPv6 types by default * Ensure NVP API connection port is always an integer * Mocking ryu plugin notifier in ryu plugin test * Rebind security groups only when they're updated * Fix format errors seen in rpc logging * Add test\_handle\_router\_snat\_rules\_add\_rules * Rebind allowed address pairs only if they changed * Enforce unique constraint on neutron pool members * Send only one agent notification on port update * Fix showing nonexistent NetworkGateway throws 500 instead of 404 * Imported Translations from Transifex * Update Zhenguo Niu's mailmap * Improve unit test coverage for Cisco plugin nexus code * Preserve floating ips when initializing l3 gateway interface * Fwaas can't run in operating system without namespace feature * Imported Translations from Transifex * metaplugin: use correct parameter to call neutron client * Replace stubout with fixtures * Imported Translations from Transifex * Imported Translations from Transifex * Mock the udevadm in the TunnelTestWithMTU test * Avoid dhcp agent race condition on subnet and network delete * Sync openstack.common.local from oslo * Imported Translations from Transifex * ML2 plugin should not delete ports on subnet deletion * Add state reporting to the metadata agent * Move MidonetInterfaceDriver and use mm-ctl * Do not add DHCP info to subnet if DHCP is disabled * Handle IPAddressGenerationFailure during get\_dhcp\_port * Add request-id to log messages * Imported Translations from Transifex * Enable polling minimization * Add configurable ovsdb monitor respawn interval * Ensure get\_pid\_to\_kill works with rootwrap script * Adds tests, fixes Radware LBaaS driver as a result * Optionally delete namespaces when they are no longer needed * Call \_destroy\_metadata\_proxy from \_destroy\_router\_namespaces * Added check on plugin.supported\_extension\_aliases * Cisco nexus plugin fails to untrunk vlan if other hosts using vlan * Catch PortNotFound exception during get\_dhcp\_port * Reduce the severity of dhcp related log traces * MidoNet: Added support for the admin\_state\_up flag * Fix OVS agent reclaims local VLAN * Replace mox in unit tests with mock * LBaaS: fix reported binary name of a loadbalancer agent * Apply six for metaclass * NVP plugin:fix connectivity to fip from internal nw * Imported Translations from Transifex * Add support for NSX/NVP DHCP services * Fix downgrade in migration * Imported Translations from Transifex * Add log statements for policy check failures * Lower severity of log trace for DB integrity error * Adds delete of a extra\_dhcp\_opt on a port * Round-robin SVI switch selection fails on Cisco Nexus plugin * Tune up report and downtime intervals for l2 agent * Fix DB integrity issues when using postgres * Move Loadbalancer Noop driver to the unit tests * Removes unused nvp plugin config param * Midonet to support port association at floating IP creation * Arista ML2 mech driver cleanup and integration with portbindings * Fix MeteringLabel model to not clear router's tenant id on deletion * Fix downgrade in migration * Fix sqlalchemy DateTime type usage * Linux device name can have '@' or ':' characters * Remove the warning for Scheduling Network * Do not run "ovs-ofctl add-flow" with an invalid in\_port * Replace a non-existing exception * Fix random unit-test failure for NVP advanced plugin * Updated from global requirements * Cleanup HACKING.rst * Remove confusing comment and code for LBaaS * Don't shadow str * ExtraRoute: fix \_get\_extra\_routes\_by\_router\_id() * remove repeated network type definition in cisco plugin * Refactor configuring of floating ips on a router * Remove database section from plugin.ini * Fix import log\_handler error with publish\_errors set * DHCP agent scheduler support for BigSwitch plugin * Fix segment range in N1KV test to remove overlap * Fix query error on dhcp release port for postgresql * sync log from oslo * Imported Translations from Transifex * Use correct device\_manager member in dhcp driver * LBaaS UT: use constants vs magic numbers for http error codes * Modified configuration group name to lowercase * Avoid dhcp agent race condition on subnet and network delete * Ensure OVS plugin is loaded in OVS plugin test * Remove deprecated fields in keystone auth middleware * Fix error while creating l2 gateway services in nvp * Fix update\_device\_up method of linuxbridge plugin * LBaaS: Fix incorrect pool status change * Imported Translations from Transifex * NVP: Correct NVP router port mac to match neutron * Updated from global requirements * Removing workflows from the Radware driver code * LBaaS: when returning VIP include session\_persistence even if None * Imported Translations from Transifex * change assertEquals to assertEqual * Fix TypeError: kill doesn't make sense * Update latest OSLO * Revert back to 'call' for agent reports * Imported Translations from Transifex * Imported Translations from Transifex * Fixing the syntax error in the XML Serializer * Raise VipExists exception in case Vip is created or updated for a pool that already has a Vip * Imported Translations from Transifex * NVP metadata access - create elevated context once * Fix race condition in dhcp agent * adding parameter to configure QueuePool in SQLAlchemy * Fix issues with db pooling * use the fact that empty sequences are false * Ensure that lockfile are defined in a common place * Imported Translations from Transifex * Fix typo in policy.json and checks in nicira plugin * Fix DB query returning ready devices in LoadBalancerCallbacks * Imported Translations from Transifex * Load all the necessary database tables when running cisco plugin * Fix haproxy cfg unit test * fix mis-placed paren in log statement for l3-scheduler * Imported Translations from Transifex * Add bulking support for Cisco plugin * Validate protocol when creating VIP * Allow tests in TestDhcpAgentEventHandler run independently * Add scheduling support for the Brocade plugin * Imported Translations from Transifex * Synchronize QuantumManager.get\_instance() method * Imported Translations from Transifex * Imported Translations from Transifex * Pin SQLAlchemy to 0.7.x * Improve test coverage for quantum wsgi module * Adds delete-orphan to database deletion * Imported Translations from Transifex * Do not disable propagate on root logger * NVP metadata access - create elevated context once * Registers root\_helper option for test\_iptables\_firewall * Resolves ryu plugin unittest errors * Set fake rpc implementation in test\_lb\_quantum\_agent * Ensure DB pooling code works with newer eventlet versions * Imported Translations from Transifex * Sync latest Oslo components for updated copyright * drop rfc.sh * Replace "OpenStack LLC" with "OpenStack Foundation" * sync Oslo Grizzly stable branch with Quantum * First havana commit * Ensure port get works when NVP mapping not stored in Quantum DB * remove references to netstack in setup.py * Imported Translations from Transifex * port\_security migration does not migrate data * Adds Grizzly migration revision * Switch to final 1.1.0 oslo.config release * Fix detection of deleted networks in DHCP agent * Add l3 db migration for plugins which did not support in folsom * Updates latest OSLO changes * Set fake rpc backend impl for TestLinuxBridgeAgent * Imported Translations from Transifex * Update oslo rpc libraries * Sets default MySql engine to InnoDB * Solve branch in migration path * Fixes Hyper-V agent issue with mixed network types * Imported Translations from Transifex * missing - in --config-file * Fix typo * Log the configuration options for metadata-proxy and agent * Imported Translations from Transifex * NVP plugin: return 409 if wrong router interface info on remove * Imported Translations from Transifex * Ensure metadata access network does not prevent router deletion * Filter out router ports without IPs when gathering router sync data * Do not delete subnets with IPs on router interfaces * Update to Quantum Client 2.2.0 * Add explicit egress rules to nvp security profile * Update tox.ini to support RHEL 6.x * Fix exception typo * Disable secgroup extension when Noop Firewall driver is used * Wrap quota controller with resource.Resource * Allow probe-create to specify device\_owner * Enable handling the report\_state RPC call in Brocade Plugin * Imported Translations from Transifex * Create quantum client for each api request in metadata agent * Lock tables for update on allocation/deletion * NVP plugin: configure metadata network only if overlapping IPs are enabled * Show default configuration Quotas * add ns-metadata-proxy rootwrap filters to dhcp.filters * isolated network metadata does not work with nvp plugin * Imported Translations from Transifex * Load quota resources dynamically * Notify creation or deletion of dhcp port for security group * fix mis-matched kwargs for a few calls to NvpPluginException * Populate default explicit allow rules for egress * Switch to oslo.config * Moved the configuration variables * Make run\_tests.sh pep8 conf match tox * Fix syntax error in credential.py and missing \_\_init\_\_.py * Imported Translations from Transifex * Add common test base class to hold common things * fix incorrect pathname * Prevent DoS through XML entity expansion * Delete DATABASE option checkup testcases * Fixes linuxbridge agent downs with tap device deletion timing issue * Rename source\_(group\_id/ip\_prefix) to remote\_(group\_id/ip\_prefix) * Imported Translations from Transifex * Setup device alias by device flavor information * L3 port delete prevention: do not raise if no IP on port * Pin pep8 to 1.3.3 * Avoid sending names longer than 40 character to NVP * move cisco-specific extensions to Cisco extensions directory * Add UT for LBaaS HAProxy driver * Include health monitors expected codes upper bound into HAProxy config * Allow DHCP and L3 agents to choose if they should report state * Imported Translations from Transifex * Enable HA proxy to work with fedora * Prevent exception with VIP deletion * Change the default l3\_agent\_manager to L3NATAgent * Imported Translations from Transifex * NEC plugin support for dhcp network and router scheduling * enable linuxbridge for agent scheduler * Move network schedule to first port creation * Imported Translations from Transifex * Host route to metadata server with Bigswitch/Floodlight Plugin * Incorrect argument in calling post\_json * fix update\_port to get tenant\_id from db rather than request * Ensure max length of iptables chain name w/o prefix is up to 11 chars * Cisco plugin support for creating ports without instances * mock quantum.agent.common.config.setup\_logging * Imported Translations from Transifex * Add initial testr support * Replace direct tempfile usage with a fixture * Set fake rpc implementation in metaplugin test configuration * Enabled add gateway to refrain from checking exit code * Add stats reporting to HAProxy namespace driver * Add session persistence support to LBaaS HAProxy driver * Remove deprecated assertEquals alias * LBaaS Agent Reference Implementation * Imported Translations from Transifex * create a Quantum port to reserve VIP address * NVP plugin support for dhcp network scheduling * Bump python-quantumclient version to 2.1.2 * Add scheduling feature basing on agent management extension * Remove compat cfg wrapper * NVP Router: Do no perfom SNAT on E-W traffic * Enable multiple L3 GW services on NVP plugin * Fix retrieval of shared networks * Imported Translations from Transifex * Remove network type validation from provider networks extension * Fix NVP plugin not notifying metadata access network to DHCP agent * Limit amount of fixed ips per port * Fetch all pages when listing NVP Nat Rules * Unpin PasteDeploy dependency version * Make sure all db accesses use subtransaction * Use testtools instead of unittest or unittest2 * Port update with existing ip\_address only causes exception * Enables packetfilter ext in NEC plugin based on its driver config * Set default api\_extensions\_path for NEC plugin * Fixes import reorder nits * Imported Translations from Transifex * Latest common updates * Limit chain name to 28 characters * Add midonet to setup.py * Add password secret to brocade plugin * Use db model hook to filter external network * Add default state\_path to quantum.conf * Imported Translations from Transifex * Imported Translations from Transifex * refactor LoadBalancerPluginDbTestCase setUp() * Imported Translations from Transifex * Remove external\_id and security group proxy code * Add pagination parameters for extension extraroute * Imported Translations from Transifex * Provide a default api\_extensions\_path for nvp\_plugin * AttributeError: No such RPC function 'report\_state' * Add pagination support for xml * Sync latest install\_venv\_common.py with olso * Imported Translations from Transifex * Add check-nvp-config utility * Close file descriptors when executing sub-processes * Add support Quantum Security Groups for Ryu plugin * Resolve branches in db migration scripts to G-3 release * Add Quantum support for NVP Layer-2 gateways * Implement MidoNet Quantum Plugin * Routing table configuration support on L3 * Correct permissions on quantum-hyperv-agent * Raising error if invalid attribute passed in * Support Port Binding Extension in BigSwitch plugin * Exit if DHCP agent interface\_driver is not defined * Supporting pagination in api v2.0 * Update latest OSLO files * Modify dhcp agent for agent management extension * Imported Translations from Transifex * Metadata support for NVP plugin * Add routed-service-insertion * plugin/nec: Make sure resources on OFC is globally unique * Fix SG interface to reflect the reality * Add unit test for ryu-agent * Agent management extension * Need to pass port['port'] to \_get\_tenant\_id\_for\_create() * Improve error handling when nvp and quantum are out of sync * Decouple helper functions from L3NatDBTestCase * Imported Translations from Transifex * Add Migration for nvp-qos extension * Use oslo-config-2013.1b3 * Shorten the DHCP default resync\_interval * Add nvp qos extension * Imported Translations from Transifex * Unable to update port as non-admin nvp plugin * Update nvplib to use HTTP constants * Rename admin\_status\_up to admin\_state\_up * Fixed the typo of loadbalancer test case * Allow nicira plugin to handle multiple NVP API versions * Imported Translations from Transifex * L3 API support for BigSwitch-FloodLight Plugin * Add an update option to run\_tests.sh * Avoid extra query when overlapping IPs are disabled * Allow tests from test\_dhcp\_agent run independently * Imported Translations from Transifex * Mark password config options with secret * Adds Brocade Plugin implementation * Add support for extended attributes for extension resources * Imported Translations from Transifex * Support iptables-based security group in NEC plugin * Persist updated expiration time * Support advanced validation of dictionaries in the API * Synchronize code from oslo * Add check for subnet update with conflict gateway and allocation\_pools * Alembic migration script for Loadbalancing service * Fix NVP L3 gateway ports admin\_state\_down on creation * Remove cfg option default value and check if missing * Remove duplicated option state\_path from netns cleanup * only destroy single namespace if router\_id is set * Use AssertEqual instead of AssertTrue * Imported Translations from Transifex * Move auth\_token configurations to quantum.conf * L3 API support for nicira plugin * Unused methods in quantum.wsgi clean up * Add firewall\_driver option to linuxbridge\_conf.ini * Adds API parameters to quantum.api.extension.ResourceExtension * fix grammar in NetworkInUse exception * Imported Translations from Transifex * PLUMgrid quantum plugin * Implements quantum security groups support on OVS plugin * Sync latest cfg from oslo-incubator * Improvements to API validation logic * Imported Translations from Transifex * add non-routed subnet metadata support * Imported Translations from Transifex * Enable OVS and NETNS utilities to perform logging * Add unit tests for Open vSwitch Quantum plugin * Add NVP Security group support * Fix import error in ryu-agent * Imported Translations from Transifex * Bad translation from network types to nvp transport types * Update .coveragerc * Register root\_helper in test\_debug\_commands and test\_dhcp\_agent * Adds xml support for quantum v2 API * Allow tools/install\_venv\_common.py to be run from within the source directory * Cisco plugin cleanup follow up commit * Be smarter when figuring out broadcast address * Use policy\_file parameter in quantum.policy * Imported Translations from Transifex * Define root\_helper variable under the [AGENT] section * Fixes rest of "not in" usage * Updated to latest oslo-version code * Imported Translations from Transifex * Imported Translations from Transifex * Imported Translations from Transifex * Resetting session persisnence for a VIP * Improve data access method of ryu-agent * Fixes 'not in' operator usage * Imported Translations from Transifex * Adds support of TCP protocol for LBaaS VIPs * Sync latest cfg from oslo-incubator * Remove redunant key list generation in Cisco plugin * Fixes if statement inefficiency in quantum.agent.linux.interface * Imported Translations from Transifex * Postgresql ENUM type requires a name exceptions NVP Plugin * correct spelling of Notify in classname * Disable dhcp\_domain distribution when dhcp\_domain is empty * Make protocol and ethertype case insensitive for security groups * Fix branch in db migration scripts * Finish adding help strings to all config options in Quantum code * Add NVP port security implementation * Imported Translations from Transifex * Set default lock\_path in state\_path * Use install\_venv\_common.py from oslo * Make get\_security\_groups() return security group rules * Fix OVSQuantumAgent.port\_update if not admin\_state\_up * Clean up test\_extensions.py imports * Fixes import order errors * OVS cleanup utility removes veth pairs * Revert "Reqd. core\_plugin for plugin agents & show cfg opts loaded." * Reqd. core\_plugin for plugin agents & show cfg opts loaded * Ensure that correct root helper is used * Fix InvalidContentType can't be raised because of error in constructor * OVS: update status according to admin\_state\_up * Cisco plugin cleanup * Improving code reuse with loadbalancer entity deletion * Fix database reconnection * Fixes per tenant quota doesn't work * Adds port security api extension and base class * LinuxBridge: set port status as 'DOWN' on creation * LinuxBridge: update status according to admin\_state\_up * Use babel to generate translation file * LBaaS plugin returns unnecessary information for PING and TCP health monitors * Fix all extension contract classes inherit from extensions.ExtensionDescriptor * get\_security\_group() now returns rules * set allocation\_pool\_id nullable=False * make IPv6 unit test work on systems with eth0 * Support Port Binding Extension in NEC plugin * Enable NEC OpenFlow plugin to use per-tenant quota * Enhance wsgi to listen on ipv6 address * Fix i18n messages * Update Oslo rpc * Enforces generic sqlalchemy types in migrations * Remove redudant code * Removes redundant code in quantum.api.api\_common * Fix i18n messages in quantum.api.api\_common * Completes unittest coverage of quantum.api.api\_common * Enable test\_agent\_ovs\_cleanup to be run alone * Fix i18n messages for cisco plugin * Provide atomic database access for ports in linuxbridge plugin * Add help strings to config file options in Quantum code * Document that code is on github now in README * Config lockutils to use a temp path for tests * Fix downgrade revision to make db migration linear * Send notification on router interface create/delete * More unittests for quantum.api.v2.base * Fixes inefficiency in quantum.api.v2.base.\_filters * Refactor hyperv plugin and agent * Update Oslo rpc module * Provide atomic database access nvp plugin * \_validate\_security\_groups\_on\_port was not validating external\_ids * Update WebOb version to >=1.2 * Ensure that agents also set control\_exchange * Add a common test case for Port Binding Extension * Fix line endings from CRLF to LF * Fixes import order nits * Fix ATTR\_NOT\_SPECIFIED comparison errors * Add migration for network bindings in NVP plugin * NEC OpenFlow plugin supports L3 agent RPC * Update latest OSLO * Catch up RPC context fixes on NEC OpenFlow plugin * ensure all enums in loadbalancer models have names * Adding multi switch support to the Cisco Nexus plugin * Name the securitygrouprules.direction enum * Adds support for deploying Quantum on Windows * Adds a Hyper-V Quantum plugin * Add exception validation for subnet used * Remove accessing cfg.CONF.DATABASE in nec-agent * Inform a client if Quantum provides port filtering feature * Remove unsused imports in the plugins package * DHCP agent unable to access port when restarting * Remove unused imports in unit tests * Use default\_notification\_level when notification * Latest OSLO updates * NvpPluginException mixes err\_msg and err\_desc * Fixes i18n messages in nvp plugin * Optimize if/else logic in quantum.api.v2.base.prepare\_request\_body() * Fixes quantum.api.v2.base.\_filters to be more intuitive * Fix for loadbalancer vips list * rename port attribute variable to SECURITYGROUPS from SECURITYGROUP * Remove relative imports from NVP plugin * Port to argparse based cfg * Fix database configuration of ryu-agent * Pass X-Forwarded-For header to Nova * The change implemented Lbaas CRUD Sqlalchemy operations * Iptables security group implementation for LinuxBridge * Update the migration template's default kwargs * add migration support for lb security groups * Fix import for quantum-db-manage * Allow nvp\_api to load balance requests * API extension and DB support for service types * Add migration support to Quantum * Remove some unused imports * Undo change to require WebOb 1.2.3, instead, require only >=1.0.8 * Add common support for database configuration * Fixup import syntax error in unit test * Enable the user to enforce validity of the gateway IP * Add comment to indicate bridge names' length * refactor QuotaV2 import to match to other exts * change xxx\_metadata\_agent() into xxx\_metadata\_proxy() * Fix the replacement placeholder in string * Ensure that exception prints UUID and not pointer * .gitignore cleanup * Fixes i18n message for nec plugin * Fixes i18n message for ryu plugin * Remove unused imports in debug package * sql\_dbpool\_enabled not passed to configured\_db nvp\_plugin * Enable tenants to set non-owned ext network as router gateway * Upgrade WebOb to 1.2.3 * Logging module cleanup * Remove unused imports in common package * Remove unused imports in rootwrap package * Remove unused imports in db package * Remove unused imports in api package * Provider network implementation for NVP plugin * Remove unused imports in agent package * Set default core\_plugin to None * Ensure that exception prints correct text * Cleans up bulk\_body generation in quantum.api.v2.base.prepare\_request\_body() * Exceptions cleanup * Readjust try/catch block in quantum.api.v2.base.create() * Ensures that the dnsmasq configuration file flag is always set * Ensure allocation pools are deleted from database * Raise InvalidInput directly instead of catch it * Ensure bulk creations have quota validations * Correct exception output for subnet deletion when port is used * Update the configuration help for the OVS cleanup utility * Implementing string representation for model classes * Provide "atomic" database access for networks * Add OVS cleanup utility * Removes redundant code in quantum.api.v2.base.create() * Add eventlet db\_pool use for mysql * Clean up executable modules * Fixes import order nits * Fix log message for unreferenced variable * The patch introduces an API extension for LBaaS service * Fix pep8 issues * Add tox artifacts to .gitignore * Correct i18n messages for bigswitch plugin * dhcp\_agent.ini, l3\_agent.ini: update dhcp/l3\_agent.ini * Make patch-tun and patch-int configurable * Update test\_router\_list to validate the router returned * Fixed the security group port binding should be automatically deleted when delete\_port * Add restproxy.ini to config\_path in setup.py * Replaces assertEquals to assertEqual * Completes coverage of quantum.api.v2.resource * Fixed the unit tests using SQLite do not check foreign keys * dhcp.filters needs ovs\_vsctl permission * Correct i18n message for nicira plugin * Correct i18n message for metaplugin * add parent/sub-resource support into Quantum API framework * plugins/ryu: l3 agent rpc for Ryu plugin is broken * pluins/ryu: Fixes context exception in Ryu plugin * DRY for network() and subnet() in test\_db\_plugin.py * Adds validity checks for ethertype and protocol * Add script for checking i18n message * Update evenlet monkey patch flags * Remove unnecessary port deletion * Support to reset dnsname\_servers and host\_routes to empty * Prevent unnecessary database read by l3 agent * Correct i18n message for linuxbridge plugin * Add router testcases that missing in L3NatDBTestCase * Releasing resources of context manager functions if exceptions occur * Drop duplicated port\_id check in remove\_router\_interface() * Returns more appropriate error when address pool is exhausted * Add VIF binding extensions * Sort router testcases as group for L3NatDBTestCase * Refactor resources listing testcase for test\_db\_plugin.py * l3 agent rpc * Fix rootwrap cfg for src installed metadata proxy * Add metadata\_agent.ini to config\_path in setup.py * add state\_path sample back to l3\_agent.ini file * plugin/ryu: make live-migration work with Ryu plugin * Remove \_\_init\_\_.py from bin/ and tools/ * Removes unused code in quantum.common * Fixes import order nits * update state\_path default to be the same value * Use /usr/bin/ for the metadata proxy in l3.filters * prevent deletion of router interface if it is needed by a floating ip * Completes coverage of quantum.agent.linux.utils * Fixes Rpc related exception in NVP plugin * make the DHCP agent use a unique queue name * Fixes Context exception in BigSwitch/FloodLight Plugin * fix remap of floating-ip within l3-agent polling interval * Completes coverage of quantum.agent.rpc.py * Completes coverage of quantum.agent.netns\_cleanup.py * add metadata proxy support for Quantum Networks * Make signing dir a subdir in /var/lib/quantum * Use openstack.common.logging in NEC OpenFlow plugin * Correct i18n message for api and db module * Fixes update router gateway successful with existed floatingip association * Fixes order of route entries * fix so cisco plugin db model to not override count methods * Use auth\_token middleware in keystoneclient * Fixes pep8 nit * Make sure we can update when there is no gateway port linked to it * Fix syntax error in nvplib * Removes quantum.tests.test\_api\_v2.\_uuid() * Add filters for quantum-debug * Removing unnecessary setUp()/tearDown() in SecurityGroupsTestCase * Fix exception when security group rule already exists * Don't force run\_tests.sh pep8 only to use -N * Correct i18n message for ovs plugin * Replaces uuid.uuid4 with uuidutils.generate\_uuid() * Correct i18n message * Removes \_validate\_boolean() * Removes quantum.common.utils.str\_uuid() * Refactors quantum.api.v2.attributes.py * Updates tearDown() to release instance objects * pass static to argv to quantum-debug config parser * Improve openvswitch and linuxbridge agents' parsing of mappings * Move extension.py into quantum/api * Ensure that the expiration time for leased IP is updated correctly * Fix context problem * bug 1057844: improve floating-ip association checks * fix broken logic of only using hasattr to check for get\_x\_counts * Prevent router being deleted if it is used by a floating IP * Updates clear\_db() to unregister models and close session * The change allows loading several service plugins along with core plugin * fix incorrect kwarg param name for region with l3-agent * All egress traffic allowed by default should be implied * Fix unitest test\_router\_list with wrong fake return value * Delete floating port and floatingip in the same transaction * Completes unittest coverage of quantum.api.v2.attributes.py * Use DB count to get resource counts * plugin/ryu, linux/interface: remove ryu specific interface driver * Allow NVP plugin to use per-tenant quota extension * Revert "Put gw\_port into router dict result." * Ensure that deleted gateway IP address is recycled correctly * Ensure that fixed port IP address is in valid allocation range * RESTProxy Plugin for Floodlight and BigSwitch * Ensure that mac address is set to namespace side veth end * plugin/ryu: update for ryu update * plugin/ryu: add tunnel support * Adds tests for attribute.\_validate\_uuid * Adds tests to attribute.convert\_to\_int * Adds tests for attributes.is\_attr\_set * Adds test scripts for \_validate\_string * Adds test scripts for \_validate\_range * Part of the patch set that enables VM's to use libvirts bridge type * Remove qpid configuration variables no longer supported * Removing unsed code for Cisco Quantum Plugin V1 * Add QUANTUM\_ prefix for env used by quantum-debug * Make tox.ini run pep8 checks on bin * Explicitly include versioninfo in tarball * Adds test scripts for \_validate\_values * Clean up quantum.api.v2.validators * Add indication when quantum server started * Import lockutils and fileutils from openstack-common * Update latest openstack-common code * Clean up executable modules * Remove nova code from Quantum Cisco Plugin * Use isinstance for \_validate\_boolean * Fixes convert\_to\_boolean logic * Updated openstack-common setup and version code * Validate L3 inputs * Treat case when pid is None * Fix openssl zombies * Ensure that the anyjson version is correct * Add eventlet\_backdoor and threadgroup from openstack-common * Add loopingcall from openstack-common * Added service from openstack-common * Sync latest notifier changes from openstack-common * Update KillFilter to handle 'deleted' exe's * Pep8 fixes for quantum master * Use \_validate\_uuid in quantum.plugins.nec.extensions.packetfilter.py * Use is\_uuid\_like in quantum.extensions.securitygroup.py * Removes regex validation of UUIDs in dhcp\_agent * Use uuidutils.is\_uuid\_like in quantum.extentions.l3 * Implements \_validate\_uuid * Use uuidutils for uuid validation * Drop lxml dependency * Testcase of listing collection shouldn't depend on default order of db query * Add uuidutils module * Log loaded extension messages as INFO not WARNING * db\_base\_plugin\_v2.QuantumDbPluginV2.create\_port clean-up * Clean-up comments in quantum/db/l3\_db.py * Import order clean-up * let metaplugin work with plugin which has not l3 extension support * Ensure that HTTP 400 codes are returned for invalid input * Use openstack common log to do logging * Put gw\_port into router dict result * Add check for cidr overrapping for adding external gateway * Fix unnecessary logging messages during tests * support 'send\_arp\_for\_ha' option in l3\_agent * pin sqlalchemy to 0.7 * Remove unused metaplugin agents * Get subnets of router interfaces with an elevated context * Support external network in probe-create * remove unused modules for linuxbridge/ovs plugin agent * Chmod agent/linux/iptables\_manager.py * Quantum Security Groups API * Make create\_floatingip support transaction * Update policies * Notify about router and floating IP usages * Fix exception when port status is updated with linux bridge plugin * Call iptables without absolute path * Delete the child object via setting the parent's attribute to None * Add unit tests for the ovs quantum agent * Add MTU support to Linux bridge * Correct Intended Audience * Add OpenStack trove classifier for PyPI * use object directly instead of the foreigh key to update master db object * Remove database access from agents * Fix database clear when table does not exist * IP subnet validation fixes * Update default base database to be V2 * Update common * add test for create subnet with default gateway and conflict allocation pool * Logging indicates when service starts and terminates * Ensures port is not created when database exception occurs * Improve unit test times * Add control\_exchange option to common/config.py * Treat invalid namespace call * get\_network in nvp plugin didn't return subnet information * tests/unit/ryu/test\_ryu\_db: db failure * correct nvplib to update device\_id * Update rpc and notifier libs from openstack.common * Add quantum-usage-audit * Fix filters default value in get\_networks * l3\_nat\_agent was renamed to l3\_agent and this was missed * Update vif driver of Ryu plugin * Support for several HA RabbitMQ servers * Correct the error message in the Class NoNetworkAvailable * Fix flag name for l3 agent external network id * clean notification options in quantum.conf * Add log setting options into quantum.conf * Warn about use of overlapping ips in config file * Do global CIDR check if overlapping IPs disabled * Fix rootwrap filter for dnsmasq when no namespace is used * Add common popen support to the cisco plugin * Use sqlite db on file for unit tests * Uses a common subprocess popen function * remove default value of local\_ip in OVS agent * Remove a function that is not used * all rootwrap filter for 'route', used by l3-agent * l3-agent: move check if ext-net bridge exists within daemon loop * Add catch-call try/catch within rpc\_loop in ovs plugin agent * Fix OVS and LB plugins' VLAN allocation table synchronization * ZMQ fixes for Quantum from openstack-common * Restore SIGPIPE default action for subprocesses * Fix for flat network creation in Cisco plugin * Removes test desription that is no longer valid * Modified code Pyflakes warning * Fix deadlock of Metaplugin * remove unittest section for nec plugin README file * remove unittest section for ryu plugin README file * Fix for DB error in the Cisco plugin * modify the wrong phy\_brs into phys\_brs * NVP plugin missing dhcp rpc callbacks * make README point to real v2 API spec * README file changes for Cisco plugin * fix for nested rootwrap checks with 'ip netns exec' * always push down metadata rules for router, not just if gateway exists * Removed eval of unchecked strings * Update NVP plugin to Quantum v2 * ovs-lib: make db\_get\_map return empty dict on error * Update l3-agent.ini with missing configuration flags * Sync a change to rpc from openstack-common * Fix for failing network operations in Cisco plugin * add missing files from setup.py * Add quantum-nec-agent to bin directory * remove not need shebang line in quantum debug * make rootwrap filters path consistent with other openstack project * Bump version to 2013.1, open Grizzly * Fix lack of L3 support of NEC OpenFlow plugin * Add a new interface driver OVSVethInterfaceDriver * Ensure that l3 agent does not crash on restart * make subnets attribute of a network read-only * Exclude openstack-common from pep8 test * Ensures that the Linux Bridge Plugin runs with L3 agent * Remove an external port when an error occurs during FIP creation * Remove the exeception handler since it makes no sense * Add enable\_tunneling openvswitch configuration variable * Create .mailmap file * Update default policy for add/remove router interface to admin\_or\_owner * Add periodic check resync check to DHCP agent * Update metaplugin with l3 extension update * Add DHCP RPC API support to NEC OpenFlow plugin * Remove an external interface when router-gateway is removed * openvswitch plugin does not remove inbound unicast flow in br-tun * Remove default name for DHCP port * Added policy checks for add interface and remove interface * allow multiple l3-agents to run, each with one external gateway net * Prevent floating-ip and ex-gateway ports should prevent net deletion * fix generation of exception for mismatched floating ip tenant-ids * Give better error to client on server 500 error * Change 422 error to 400 error * Add IP version check for IP address fields * Policies for external networks * Add IP commands to rootwrap fileter for OVS agent * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Fix broken L3 support of Ryu plugin * check subnet overlapping when adding interface to router * add local network type and use by default for tenant networks * Fix data passed to policy engine on update * remove incorrect mock assert\_called in unit tests * Fix dhcp agent rpc exception handling * Add missing include for logging when log\_config is used * Modified code Pyflakes warning * Modified code pyflakes warning * Improve error message when flat network already exists * Lower webob dep from v1.2.0 to v1.0.8 * Allocation pool creation should check if gateway is in subnet * Make sure floating IPs + gateways must be on external nets * restart dnsmasq when subnet cidr set changes * supress dhcp router opt for subnets with null gw * add rootwrap filters to wrap ip netns exec * Implements agent for Quantum Networking testing * Quantum dhcp crashes if no networks exist * Update with latest code from openstack-common (stable/folsom) * Fixes undefined variable 'network\_type' in OVS agent * Create utility to clean-up netns * Fix lack of L3 support of Ryu plugin * Ensure that port update set correct tag in OVS * ovs\_lib unable to parse return when port == -1 * L3: make use of namespaces by agent configurable * Fix error in rule for metadata server dnat * Fix programming error of ryu-plugin * Ensure network delete is handled by OVS agent * Implement L3 support in Metaplugin * Fixes agent problem with RPC * netns commands should always run in the root ns * Add lease expiration management to ip recycling * misc L3 fixes * expose openvswitch GRE tunnel\_id via provider API * Do not transfer ips if there isn't any * prevent invalid deletion of ports using by L3 devices * Modified code PEP8 warning * Implementation of 2nd phase of provider extension for openswitch * Mangle network namespace name used by dhcp\_agent * Update rootwrap; track changes in nova/cinder * remove policy check for host\_routes in update\_port * Ensure proper validation for l3 API attributes * Cisco nexus sub-plugin update\_network fix * Fix dhcp option distribution by dnsmasq * fix bug where network owned resources block delete * Plugin aware extensions should also be reset at each test setup * Ensure network connectivity for linuxbridge flat network * Execute unit tests for Cisco plugin with Quantum tests * prevent OVS + LB plugins from clearing device\_id and device\_owner * updated outdated comments in base v2 plugin class * clear db.\_ENGINE for each plugin init in Metaplugin * Enable tox to run OVS plugin unit tests * Allow tox to run plugin specific unit tests * fixes cisco nexus plugin delete network issue * Fix Metainterface driver with namespace * Add lease expiration script support for dnsmasq * Remove 'verbose' API capability * PEP8 issues fixed * removed some unused global variable * Update TESTING file * Typo fix in quantum: existant => existent * Add DHCP RPC API support to Ryu plugin * Run core unit tests for each plugin * OVS plugin tunnel bridges never learn * Add nosehtmloutput as a test dependency * fix typo in OVS plugin from recent bugfix * enable router deletion logic in l3-agent * Enable users to list subnets on shared networks * Fix IP allocation on shared networks ports * Move metaplugin test for common test directory * Enable DHCP agent to work with plugin when L2 agents use DB polling * fix associating a floating IP during floating IP creation * Ensure that LB agent does not terminate if interface already exists in bridge * Treat exceptions when invoking ovs-vsctl * Remove v1.0 and v1.1 API from version info * Get OVS port details from port ID * Fix undefined variables * Fixing unit test failures in Cisco plugin * fix netns delete so that it works when a ns is set * Linuxbridge support for L3 agent * Fix exception message for bulk create failure * quantum l3 + floating IP support * Add missing conversion specifiers in exception messages * Use a common constant for the port/network 'status' value * Remove unused variable * Log message missing parameter causes exception * Update README for v2 API * Fix flavor extension based on new attribute extension spec * Update the Nicira NVP plugin to support the v2 Quantum API * Enhancements to Cisco v2 meta-plugin * Add model support for DHCP lease expiration * Trivial openvswitch plugin cleanup * Convert DHCP from polling to RPC * Add quota per-tenant * Reset device owner when port on agent is down * Allow extra config files in unit tests * Fix visual indentation for PEP8 conformance * Updates pip requirements * NEC OpenFlow plugin support * Enables Cisco NXOS to configure multiple ports Implements blueprint cisco-nxos-enables-multiple-ports * Implementation of second phase of provider extension * deal with parent\_id not in target * remove old gflags config code * convert query string according to attr map * Add device\_owner attribute to port * implementation for bug 1008180 * Fix bulk create operations and make them atomic * Make sure that there's a way of creating a subnet without a gateway * Update latest openstack files * improve test\_db\_plugin so it can be leveraged by extension tests * Adds the 'public network' concept to Quantum * RPC support for OVS Plugin and Agent * Initial implemention of MetaPlugin * Make dhcp agent configurable for namespace * Linux Agent improvements for L3 * In some cases device check causes an exception * normalize the json output of show a given extension * move the correct veth into the netns for the LB * linux bridge fixes following v1 code removal * fixes typo in ensure\_namespace * Remove v1 code from quantum-server * Add netns to support overlapping address ranges * dhcp-agent: Ryu plugin support for dhcp agent * fix missing deallocation of gateway ip * RPC support for Linux Bridge Plugin and Agent * Implementation of bp per-net-dhcp-enable * Enhance Base MAC validation * Use function registration for policy checks * Exempt openstack-common from pep8 check * Make 4th octet of mac\_range configurable * Replace openvswitch plugin's VlanMap with vlan\_ids DB table * Remove unused properties * Notification for network/subnet/port create/delete/update. blueprint quantum-notifications * Make the plugin for test\_db\_plugin configurable * update DHCP agent to work with linuxbridge plug-in * ryu/plugin, agent: unbreak 610017c460b85e1b7d11327d050972bb03fcc0c3 * Add classmethod decorator to class methods of providervlan ext * Only delete VLAN information after Quantum network is deleted * Make quantum pipeline configurable from quantum.conf * ovs\_quantum\_plugin should use reconnect\_interval in common conf * add name into port and subnet * Update openvswitch tunnel unittest * Enable agents and plugins to use the same configuration file * Fix linuxbridge agent tests * Update openstack-common files * Initial V2 implementation of provider extension * Implements data-driven views and extended attributes * Add v2 API support for the Cisco plugin Blueprint cisco-plugin-v2-api-support * Enhance V2 validations to work better for integers and booleans * Refactor the test cases so that all the test cases are under one test class * Add quota features into quantum. Blueprint quantum-api-quotas * Assume that subclass validates value of UUID * fix bug lp:1025526,update iniparser.py to accept empty value * Ensures policy file is reloaded only if updated * Provide way to specify id in models\_v2 * Add validity checks to Quantum v2 resources * Avoid removal of attributes used by policy engine * Raise proper exception if policy file do not exist * Introduce files from openstack common * Ensures API v2 router does not load plugin twice * ovs-agent exception non-existent ports * Ryu plugin support for v2 Quantum API * Add option sql\_max\_retries for database connection * Enable quantum agents to work with global cfg.CONF * Create DHCP agent tap device from port ID * Fix some syntax errors * fix bug lp:1019230,update rpc from openstack-common * Fix v2 API policy checks when keystone is in use * implement dhcp agent for quantum * Corrects imported modules in Cisco and Ryu according to latest nova packages * Validate that network\_id in port/subnet POST belong to the same tenant * Verify CIDR overlaps among networks' subnets * Address problems with foreign keys with subnet and network deletion * Add 'allocation\_pools' to Quantum v2 API subnets * Delete IP allocation range for subnet when deleting subnet * Fix linux bridge plugin to be consistent with naming rules * v2 support for the linux bridge plugin * OVS plugin support for v2 Quantum API * Check if interface exists in bridge prior to adding * Ensure that subnet\_id is on correct network * Use setuptools git plugin for file inclusion * Cisco's unplug\_iface refers to non existing exception * Implement IP address allocation * Enable user to configure base mac address * Bug #1012418 - quantum agent for OVS does not install properly on Xen XCP * Add simple file loggin to ovs\_quantum\_agent * Fixing pep8 warning messages Bug #1017805 * Network deletion and subnet creation bug fixes bug 1017395 * Remove paste configuration details to a seperate file. blueprint use-common-cfg * Bug 1015953 - linuxbridge\_quantum\_agent device\_exists() is buggy * Reorder imports by full module path * Added iptables\_manager ( based on openstack/linux\_net.py ) This module will be the base library to implement security groups and generic firewall. It is an independent iptables module, made to be easy to package if used by agents and also inside quantum * Unit test and Readme changes related to cisco plugin * Implements the blueprint use-common-cfg for the quantum service. More specifically uses global CONF for the quantum.conf file * Ensure unique mac address allocation. This is the first part of bug 1008029 * Add authZ through incorporation of policy checks * Fix additional pep8 issues on Jenkins bug 1014644 * removed "runthis" and other unused functions from utils.py * Linux bridge agents did not work with common linus utils bug 1014286 * Added vlan range management for OVS plugin * Bug #1013967 - Quantum is breaking on tests with pep 1.3 * Remove wrong base class for l2network\_models after v2.0 API * Cisco cli cannot find argument action\_prefix * Use openstack.common.exception * Remove unused functions in common/utils.py * API v2: mprove validation of post/put, rename few attributes * Bug #1000406 - Return value of shell commands is not checked by plugins * Fix python2.4 incompatibility * Add API v2 support * Binaries should report versions * Fix up test running to match jenkins expectation * Add build\_sphinx options * Remove unused imports * Quantum should use openstack.common.jsonutils * Remove hardcoded version for pep8 from tools/test-requires * AuthN support for Quantum * fix bug lp:1007557,remove unused functions in utils.py * Add common dir for shared agent code, add OVS lib * Bug #1007153 * Register enable\_tunneling as bool opt * Quantum should use openstack.common.importutils * PEP8 fixes * Bug #1002605 * Automatically determine Quantum version from source * Fix linux bridge section name Bug #1006684 * Remove the reference to non existing exception by linuxbridgeplugin * bug #1006281 * Parse linuxbridge plugins using openstack.common.cfg * Bug #1004584 * fix some pylint warnings * fix errors in database test cases * Log the exception so app loading issues can be debuged * remove unneeded import from OVS agent that break 2.4 compat * blueprint man-support and fix documentation build bug 995283 * Fix print error for linux bridge bindings bug 1001941 * Add HACKING.rst to tarball generation bug 1001220 * fall back to \`ip link\` when \`ip tuntap\` unavailable bug 989868 * Cisco plugin CLI call to quantumclient CLI * Calling Super method from QuantumPortAwareScheduler.\_\_init\_\_ * OVS plugin: add tunnel ips to central database * Include AUTHORS in release package * blueprint database-common bug 995438 * bug 996163 * Bug #994758 * Change Resource.\_\_call\_\_() to not leak internal errors * Let OVSQuantumTunnelAgent sync with database * Cleaned up log usage * blueprint agent-db-ha bug 985470 bug 985646 * Update codebase for HACKING compliance * Make sample quantum.conf compliant with docs * Make ovs Interface option set properly * Removed simplejson from pip-requires * Remove dependency on python-quantumclient * Add sphinx to the test build deps * Add HACKING.rst coding style doc * return 404 for invalid api version request * fix issue with OVS plugin VLAN allocation after a quantum-server restart * bug 963152: add a few missing files to sdist tarball * API docs: fix typo for network delete * Open Folsom * Bug #956559 VIF driver and scheduler for UCS plugin are broken since the flag configuration mechanism in nova is changed. Fixing that and also fixing some property names, along changes to how the quantum client code is invoked * plugin/ryu/agent: unbreak a06b316cb47369ef4a2c522f5240fa3f7f529135 * Fix path to python-quantumclient * Split out pip requires and aligned tox file * ryu/nova: catch up d1888a3359345acffd8d0845c137eefd88072112 * Add root\_helper to quantum agents * Fix missing files in sdist package [bug 954906] * Fix for bug 921743 Response codes for create ops in API v1.0 not compliant with spec * bug 954538 Fix for the cisco unit tests * check connection in Listener. refer to Bug #943031 * fixed incorrect duplicate title * Fixed incorrect title for example 3.10 * Downgraded required version of WebOb to 1.0.8 * Bug #949261 Removing nova drivers for Linux Bridge Plugin * Remove outdated content from OVS plugin README, point to website instead * add git commit date / sha1 to sphinx html docs * more files missing in sdist tarball * make sure pip-requires is included in setup.py sdist * Introducing the tenant owenrship checks in the Cisco plugin, changes are almost identical to those in Bug#942713 * Fix some plugins that don't check that nets + ports are owned by tenant * remove pep8 and strict lxml version from setup.py * plugin: introduce ryu plugin * bug 934459: pip no longer supports -E * Fix bug 940732 stack.sh can't match sql\_connection string * Return appropriate error for invalid-port state in create port API * blueprint quantum-ovs-tunnel-agent * Initial commit: nvp plugin * unittests: setup FLAGS.state\_path properly: bug 938637 * Cleanup the source distribution * Fix ovs config file location * blueprint quantum-linux-bridge-plugin * Remove quantum CLI console script * Bug 925372: remove deprecated webob attributes (and also specify stable webob version in pip-requires) * bug 923510: avoid querying all ports for non-detail GET Network call * Make tox config work * Pin versions to standard versions * bp/api-filters This changeset implements filters for core Quantum API and provides unit tests * Split out quantum.client and quantum.common * Quantum was missing depend on lxml * bp/api-error-codes Restructured API error codes for Quantum API v1.1 This changeset provides the following changes: - Only standard HTTP errors for Quantum API v1.1 - Customized fault response body formatting according to API version - Changes to unit tests to deal with version specific status codes * blueprint ovs-portstats * Add support for dealing with 501 errors (notimplemented) * Improved VlanMap * moving batch config out of quantum-server repo * bug 920299: remove duplicate + outdate README * Getting ready for the client split * Removed erroneous print from setup.py * Fixes setup scripts for quantum plugins * Base version.py on glance * fix mysql port in sql\_connection example.. * Make the quantum top-level a namespace package * Add \_\_init\_\_.py from plugin to be copied on setup scripts * Fix lp bug 897882 * PEP8 quantum cleanup * Install a good version of pip in the venv * Rename .quantum-venv to .venv * Updating Cisco README with instructions on installing the patched ncclient library * Remove plugin pip-requires * blueprint refactor-readme-to-manual * Bug #890028 * Implementation of the BP services-insertion-wrapper inside the Cisco Plugin * blueprint operational-status-ovs-plugin * bug 903580: remove invalid extensions path from quantum.conf * Fix for bug 902175 * Readme Fix * blueprint api-framework-essex * Fix for bug 900277 * Fix for bug 900316 * Modified the Readme for Unit Test Execution Instructions * Bug 900093 Remove unused function in db/api.py * bug #891246: Fix paths in agent Makefile * Second round of packaging changes * Bug 891705 Fix to change reference to the Quantum CLI from within the Cisco extensions' CLI module * Correcting the plugins classpath in the Quantum README * The relative path for the "ucs\_inventory.ini" file has been fixed * bug #891267 : for XS, grab iface-id from XAPI directly if needed * Changes to make pip-based tests work with jenkins * Fix for bug 890498 * Fix for bug 888811 * Fixing find\_config\_file after packaging changes * Added timeout flag to ovs-vsctl to avoid infinte waiting * Add quantum.exceptions path to configed ext paths * Fix for Bug #888820 - pip-requires file support for plugins * Fixing Cisco plugin after update\_\* change * Fix for bug 888207 * Fix for bug 877525 * Bug #875995: Quantum README fixes * Change version numbers to be compatible with debian packaging * Make the openvswitch plugin tests work again * Swich over to update\_{net,port} instead of rename\_net and set\_port\_state * Added try import to quantum-server and quantum-cli * Bug 887706 * Blueprint authentication-for-quantum * blueprint quantum-packaging * Moved the initialization of the blade state so that the interfaces which are configured outside of Quantum are also initialized in the blade state * fix minor double-serialization bug in client.py * bug #863635: remove vestigial cheetah import from bin/cli * Change the ovs plugin create\_\*() calls to take the kwargs param * Changing the log messages in order to be always identified by their sub-packages of origin, and they can even be filtered on that basis * Add .gitreview config file for gerrit * New tests are being adding to the Diablo code (Cisco L2-Network plugin), and some fixes in the case where the tests were failing * Add the ability to specify multiple extension directories * Add code-coverage support to run\_tests.sh (lp860160) * Change port/net create calls to take an additional kwargs param * ovs plugin: Remove reference to set\_external\_ids.sh * fix pep8 issues in Cisco plugin * Remove hack for figuring out the vif interface identifier (lp859864) 2011.3 ------ * Update openvswitch plugin README * Update openvswitch plugin README * Get output from run\_tests * Add rfc.sh to help with gerrit workflow * merge tyler's unit tests for cisco plugin changes lp845140 * merge salv's no-cheetah CLI branch lp 842190 * Addressing Dan's comment on output generator * merge sumit's branch for lp837752 * merge salv's branch for bug834013 * merge salv's branch for keystone token on client bug838006 * merge rohit's db test branch: lp838318 * merge salv fix for bug 841982, fix minor pep8 violation * merge salv fix for bug834008 * Changes to address Salvatore's review comments, removed unnecessary imports, and changed a debug message * changing key names to confirm to api specs * Merging latest from lp:quantum * Merging lo:~salvatore-orlando/quantum/quantum-api-auth * Implementing Dan's suggestion concerning fixing the bug in db api rather than FakePlugin * Fixing bad indent * syncing diverged branches * merging from lp:quantum * merging from lp:quantum * Updating CLI for not using Cheetah anymore. Now using a mechanism based on Python built-in templates * Fixing the bug in FakePlugin * made general exception handling messages consistent removed LOG pylint errors cleanup in tests * Create operation now generate response with status code 202 * restoring correct default pipeline * Mergin from lp:quantum * Add information about quantum dependency for nova * merge salv's branch to remove dummy plugin * Changing communication between UCSM driver to UCSM to HTTPS * Adding CLI usage examlpes to the README * Adding client-side support for Keystone integration * Keystone-integrated pipeline should not be default in quantum.conf * Removing class DUmmyDataPlugin * Removed redundant configuration, and added more comments in the configuration files * Updating the README file * Merging Shweta's test cases for mutliport resource * Adding Multinic tests * Typo fix in README * Merging Sumit's changes including fixes for multinic support, and CLI module for working with extensions * More fixes for multi-nic support * Fixed a bug with plug\_interface * Merging from Cisco branch * Changes to incorporate earlier review comments, also for multiport resource * adding quantum database unit test cases * Merging changes from Ying's branch (new mutliport resource) * add multiport and exception handling * add multiport resource * Merging from lp:quantum * Avoiding deserializing body multiple times with several parameters * merge cisco consolidated plugin changes * Test on param\_value changes as follows: * Merging lp:~salvatore-orlando/quantum/bug834449 * Merging Ying's changes (minor) * fix print statements in novatenant and portprofile * merge trunk * Minor refactoring * Changes to l2network\_plugin for create\_ports and pylint fixes to cli.py * Modified CLI to handle both core and extensions CLI * merge trunk * lp835216 client lib was not passing in kwargs when creating exceptions * lp834694 fix integrity error when deleting network with unattached ports. Add unit test * Minor fix in delete\_port * merging changes from cisco consolidated branch * Fixes to support multinic * Merging fixes from Sumit's branch for extension API version number and to UCS inventory to associated VIF-ID with ports * Merging from the Cisco branch * adding new api methods using just port\_id * Fixing the extensions URL to 1.0 and pep8 error * bug fixes to handle multinic * Merging Shweta's fix for extensions' test cases (clean up was not happening completely) * Adding Network and Port clean up functions for portprofile unit tests * Merging from lp:quantum * Merging Shweta's fixes in the tests for key names changes in the Core API * make CLI show\_port command display interface-id, add additional test case * merge salvatore's new cli code * Dictionary key values changes in test\_extension * Merging lp:quantum, resolving conflict * merge two pep8 branch * Merging Ying's pep8 fixes * fix pep8 issues * Merging quantum trunk * fix pep8 warnings * Updating common/extensions.py in order not to instantiate a QuantumManager when retrieving plugin * Cleaning pep8 * Merging lp:~danwent/quantum/lp834491 Fixing Bug #834491: api alignment merge broke ovs plugin (Critical) * Addressing comments from Dan * Merging from quantum * merge cisco extensions branch * lp834491: change plugin to work with API code after the API alignment merge * Merging Shweta's fixes to the test cases for the extensions * Added Extension & ucs driver test changes and fixes * Merging from Sumit's branch, changes to VIF-driver and Scheduler; extension action names have been changed in response to Salvatore's review comments in the extensions branch review * Syncing with Cisco extensions branch * Merging changes from Sumit's branch * Changes qos description to string; changes extension API names for get\_host and get\_instance\_port * Mergin Ying's branch * change get\_host and get\_instance\_port function name * Cleaning (removing) unused code..hooray ! fixes for extension tests * Sorting correctly all imports for the Nexus Driver and Unit Test * Fixed the Unit Test for Nexus Driver * add cisco\_faults under l2network package * move faults/exceptions to l2network package, remove unecessary faults definitions change the portprofile action api's method fix imports order and other comments issues * Merging from Sumit's branch, import ordering related changes * Changing the order of imports (to satisfy convention) * Merging the Cisco branch * Updating README according to Somik's comment * Finishing cli work Fixing bug with XML deserialization * Completing Unit Tests * Merging lp:~salvatore-orlando/quantum/quantum-api-alignment * Configuration of multiple VLANs on the same Nexus Switch Interfaces * Adding unit test for rename\_network * Added logging to syslog or file specified at command line removed plugin direct mode fixed unit tests to reflect changes in cli code fixex pep8 errors * Merging from Sumit's branch * Fixed some bugs with credential and qos resources; also fixed l2network\_single\_blade * Merging Rohit's changes * helper function to get creds based on name * integration with l2network\_plugin.py * fixing relative import in nexus\_db.py * putting in db support for creds and qos * merge latest quantum branch and resolve conflicts * Merging lp:~asomya/quantum/lp833163 Fix for Bug #833163: Pep8 violations in recent packaging changes that were merged into trunk (Critical) * Addressing Somik's comment * Templated output for CLI completed! * PEP8 fixes for setup.py * delete quantum/common/test\_lib.py to prepare for quantum merge * Made changes according to reviewer's comments. Add addtional information on extension test in README * Merging changes from Sumit's branch * Merging lp:~cisco-openstack/quantum/802dot1qbh-vifdriver-scheduler * Merging lp:~cisco-openstack/quantum/l2network-plugin-persistence * Fixed a bug in the initialization of the UCS inventory; fixed another bug in deleting a port * Noticed some pep8 errors, fixed them * Merging lp:quantum * Changes to incorporate reviwer's comments. Also changed client.py to handle extension URLs * Review Changes * remove unnecessary code and sync faults and exception handling * Code changed base on Reviews pep8 passed pylint 9.10 * merging with lp:quantum * merging from lp:quantum * Fixes based on review comments * Addressing comments from Ziad and Somik * merge lp:~bgh/quantum/lp837174 * Fix unit test printing (lp837174) * Fixing issue in view builders concerning attachment identifiers * Code clean up as per reviewr's request; documentation strings, unused code, etc * Rewording of the README file to clarify the use of the SSh port * clean up code and fix some comments * clean code and fix some comments * Merging from Sumit's latest branch - Fixed loading of Nexus DB tables; moved imports to l2nework\_db.py; Refactoring of code to generalize inventory handling (enhancement) * Fixed loading of Nexus DB tables; moved imports to l2nework\_db.py, changes discussed & approved by Rohit * Making Keystone version configurable * Accidentally took quantum.conf out of branch. Now back in * Merging lp:~raxnetworking/quantum/bug827272 * Merging branch: lp:~danwent/quantum/test-refactor * Removing "excess" file * Missed adding a file earlier, fixed a small issue * Refactoring of code to generalize inventory handling (enhancement) * Merging UCS inventory state initialization fix from Sumit's branch * Fixes an issue with loading the UCS inventory when a dynamic nic has been used outside of Quantum * Removed obsolete instructions from README * Changes to reflect the new features (mutli-blade, multi-chassis support) * Changes to support calls from VIF Driver and Scheduler * Pep8, pylint fixes * fixing pep8 error * adding helper function for port binding model * UCS inventore persistence and pep8/pylint fixes * UCS persistence fixes * added new columns to models for ucs plugin multi blade support updated methods in ucs\_db for newly added columns changed column dynamic\_vnic\_id in port binding table to blade\_intf\_dn updated tests to handle new column name * Merging rohit's UCS persistence support * UCS plugin persistence * Persistence support for UCS plugin network * adding utility functions to create dictionaries * Merging changes from Rohit's branch * Merging changes from cisco extensions * added ucs plugin related execptions in cisco\_exceptions.py added ucs plugin persistence related modules - ucs\_models.py and ucs\_db.py added ucs db related unit tests in test\_database.py fixed formatting in l2network\_models.py and test\_database.py * Adding some error checks * Reduced excessive logging * Several fixes to initial version * fixing the the test\_database.py tests * pylint and pep8 fixes * Change profile-id * merged Shweta's branch for ext test. Minor fix for review comments * Review Changes * merged Shweta's ext test branch * Initial commit with lots of changes * Moved the conf file uncer the cisco directory * Moved the conf file uncer the cisco directory * Updated conf file * Adding Entension API unt tests * Syncing with lp:quantum * Code refactored, made changes are per reviwer's suggestions * sync up with l2network exception handling for extension * merged Cisco branch's latest changes * Adding changes from Sumit's latest merge * merge with lp:~cisco-openstack/quantum/l2network-plugin-extensions * replace exception handler by using cisco\_exceptions * Raising exceptions in extension resources handling (where missing). Changing exception name to QosNotFound * Changing exception name to QosNotFound * Mergin from Cisco branch * Raising exceptions in extension resources handling (where missing) * Merging fixes to client side exception handling. Thanks lp:tylesmit ! * Merging fixes and changes batch-config script. Thanks lp:danwent ! * Adding the Nexus support to the Persistence Framwork Modification of the Nexus Unit Case to be running with Persistence Framework pep8 passed pylint 8.81/10 * added nexus exception in cisco\_exceptions.py added log to methods in l2network\_db.py added nexus\_db.py and nexus\_models.py - persistence modules for nexus plugin * add plugins.ini back * add all conf/\*.ini back * merge with ying's branch * merging with Ying's extension branch * remove ying's test ciscoplugin * remove all configuration files * remove cisco\_demo and test\_scripts directory, which were used by our local tests * Removed concatenation per review comments * change the configuration files to the default values * pylint and pep8 fix * merging with ~cisco-openstack/quantum/l2network-plugin-extensions * fix pylint issuses * Making keystone integration optional in quantum configuration * Merging bug fix for Bug 821733. Thanks lp:salvatore-orlando ! * Fixing typo * Making the client raise the appropriate exception if needed. Also increasing the pylint score to above 8 * pep8 error fixed for l2network\_db.py * Mering Sumit's branch with plugin support for Credentials, QoS, NovaTenant resources. Also merging latest from lp:~cisco-openstack/quantum/l2network-plugin-persistence * Merging from Sumit's branch, VIF-driver and Quantum-aware scheduler * Removed extra spaces to satisfy pep8 * VIF driver for 802.1qbh and Quantum aware scheduler * fix some pylint issues * Pylint and pep8 fixes * Changes to support credentials, qos, and novatenant extensions * Removing unused error response codes * Merging lp:~asomya/quantum/lp824145 Fix for Bug#824145 : Adding a setup script for quantum * merge trunk pep8 fixes adapting CLI to API v1.0 Fixing wsgi to avoid failure with extensions * Fixed indentation and changed file comments * add extension change to ying's branch * merge trunk * Pulling in changes from lp:quantum * Merging Cisco's contribution to Quantum. Thanks to various folks at Cisco Systems, Quantum will have plugins to integrate with Cisco UCS blade servers using 802.1Qbh, Cisco Nexus family of switches and the ability for Quantum plugin to have multiple switches/devices within a single Quantum plugin * Merging Shweta's change to fix a function call in the test code * Adding the changed UCS Driver function names in test\_ucs\_driver * Santhosh/Deepak | Fixed an issue where collection actions for PUT and DELETE methods in resource extension were routing to update and delete action of the resource * Merging from Sumit's branch pylint fixes and incorporating review comments * Changes to README file and merging Shweta's changes * Mergin Shweta's test changes, also README file * Changes to test structure. Adding pylint correctons * Fixes to the README file per earlier review comments. Also removed main from one of the modules * Mergin from cisco brach * Merging from lp:quantum * Pulling changes from Cisco branch * Pylint fixes * exit unit tests if tests are invoked specifying a particular test * Merging Nexus pylint changes and other enhancements from Edgar * pep8 passed pylint 8.83 * Merging Rohit's changes * Partial commit * Moved test\_database.py to plugins/cisco/tests/unit/ Edited test\_database.py to be able to run like other tests pylint for cisco/db folder - 8.85/10 pylint for cisco/tests/unit/test\_database.py - 8.42/10 pep8 done * Adding a new file with all the XML snippets to make code easier to read Moving the Nexus SSH server port to the configuration file Removing main functions Making some changes based on Dan and Salvatore reviews * Changes in the README file to incorporate Somik's comments * pylint changes - pylint score for cisco/db folder - 8.27/10 pep8 checks done * Removing extra testing function on Nexus Driver * Merging plugin and tests' changes * Fixes to the tests which were breaking, including fixes to the test cases * Pulling in changes from Rohit's branch * Pulling in changes from Shweta's branch * Removed main from modules as per review comments * updated README file to include persistence framework setup instructions updated db api.py unset\_attachment method to return port moved db\_conn.ini into cisco/conf/ with other configuration files updated l2network\_plugin\_configuration.py to get db config cleaned up l2network\_db.py - removed config parser code as using cisco config parser updated l2network\_db.py to raise specific exceptions in error cases updated create\_vlanid method in l2network\_db.py to not raise exception if vlan rows exist updated portprofile and portprofile\_binding methods to include tenant\_id as an argument added cisco/db/test\_database.py containing unit tests for quantum and l2network\_plugin tables edited get\_pp\_binding method in l2network\_db.py to return empty list when no results found pep8 checks done * Adding Persistence unit test * Fixed bugs while testing * pep8 errors fixed * Merging rohit's changes * Changes to support persistence framework * Merging: lp:~danwent/quantum/client-lib * Merging: lp:~tylesmit/quantum/api-client-fix-serialization Adding automattic serialization to all requests by moving it to do\_request * First, trivial, implementation of authN+authZ * fixes from rohit's branch * from rohit's branch * Adding more templates More tests * - Added new tables VlanID to generate ids and maintain usage of vlans - Added wrapper functions to get next unused vlan, populate vlans, release vlans, getall vlans, isused van and delete van - Added ported instead of networked for portprofile binding table - Changed wrapper methods and test cases for portprofile binding to use portid * Adding missing files to branch * Simplifying condition * FIxing missing 'output' variable @ line 243 (syntax error) * Adding automattic serialization to all requests by moving it to do\_request * added network and port models similar to quantum with following changes - - InnoDB as storage engine to allow foreign key constraints - joinedLoad operation on the queries to make use of relation between Network and Port Moved out the network and port code to make l2network contain vlanbinding, portprofile and portprofile bindings * Authentication with Keystone. auth\_token Middleware tweaked and imported in Quantum tree Developing Authorization middleware * Introducting cheetah Updating list\_nets in CLI Writing unit tests for list\_nets Stubbing out with FakeConnection now * I'm too tired * Stubout work in progress * Merging quantum extenions framework into trunk. Thanks rajaram vinkesh, deepak & santhosh for the great work! * - added network and port models into the l2network plugin instead of using quantum models - added api methods for network and ports - restructured code to use the l2network network and port - added l2network base class for other tables to inherit - added support for l2network plugin model objects to behave like dictionary (gets rid of code to convert objects into dictionaries) - added foreign key constraints to l2network plugin model attributes representing columns - added attributes to represent relation between models in l2network plugin - added joinedload only to network and port (need to to for others) - added InnoDB as the storage medium in base table for imposing foreign keys - updated l2network test cases to handle foreign key constraints * lp Bug#824145 : Adding a setup script for quantum * skeleton for cli unit tests * merge trunk * Removing exceptions as well (previously only API faults were removed) * Merged quantum trunk * adding renamed client-lib tests * Tiny change to the README file, instructions on how to get ncclient * - Adding setup script * Adding db connection and l2network plugin database modules * update CLI to use show instead of list for calls that do not return a list * rename client\_lib unit tests so it is run by ./run\_tests.sh, update tests to handle name changes * force batch\_config.py to use json, as XML has issues (see bug: 798262) * update batch\_config.py to use new client lib, hooray for deleting code * Changed to default plugin class name * Rajaram/Vinkesh | Added examples of scoping extension alias in request and action extension * Added tests directory to list of modules in the README file * Added "tests" directory to the list modules in the README file * Adding the required build for Nexus support * Merging changes addressing Bug # 802772. Thanks lp:danwent ! * Merging bugfix for Bug 822890 - Added License file for Quantum code distribution * Fixed typo in README * README file updates (pointer to Nova Cactus branch), and numerous other edits based on Mark's template * L2 Network Plugin Framework merge * Incorporated changes in response to review comments from Ram * Adding Apache Version 2.0 license file. This is the official license agreement under which Quantum code is available to the Open Source community * Making a check for the presence of UCS/Nexus plugin (earlier it was not in certain cases). With this change, if the UCS/Nexus plugins are not enabled, the core API tests can be run even on Ubuntu (and RHEL without the requirement of any specific network hardware) * Merging test cases from Shwetas' branch, and further modified README file * Merging the test framework from Shweta's branch * decluttering \_parse\_request\_params method for QuantumController * Fixing detail action for port collection Adding PortIsDown exception Adding unit tests for detail actions and PortIsDown PEP8 FIXES * Adding Unit Test Cases Now * Adding Cisco Unit Tests * minor enhancements to quantum client-lib * RHEL limitation updated * Adding support for expressing format through Content-Type header Adding action detail for port resource (Member & Collection) * Changes to enhance L2 network plugin framework * undo unintentional formatting change in run\_tests.sh * remove unneeded \_\_init\_\_ * refactoring testing code to support plugin tests * Added QuantunPluginBase as the base class for the l2network\_plugin * Generalized and put placeholders * another merge * pep8 cleanup, restore defaults * Added info about ssh conf required for nexus switch * merge * remove unneeded tests from ovs\_quantum\_plugin * Nexus plugin classpath was incorrect, fixed it * Edits to reflect conf changes, made it easier to follow * merge heckj's pip-requires fixes * Fixed issue with creating new port profiles (one configuration parameter got left out during the migration to the new configuration scheme). Also fixed a bug in the calculation of the profile id * Fixes the broken call to second level of plugins. Renaming will work now * updates to pip-requires for CI * Loading of device-specific plugins and drivers is done dynamically by setting configuration. All configuration is driven through configuration files place in the conf directory. Each .ini conf file contains info on the configuration. README file updated to reflect all the changes. Fixed issue with delete\_network deleting the network even when attachments were present. Fixed issue with port id generation * Deepak/Vinkesh | Fixed show action in extension controller to return 404, added example to include namespace in a request extension * Merged quantum trunk * Santhosh/Vinkesh | Added extension\_stubs file * Removing extra file in Nexus Driver * Removing extra file in Nexus Driver * Relabelling API version to 1.0! * Cosmetic changes to unit tests for client library. Pep8 fixes * Removed quantum/plugins/cisco/db/ and quantum/cisco\_extensions since these will be merged separately * Fixed pep8 error * Merging changes * Merging changes from lp:quantum * Fixed an issue selecting the right port interface and also properly switching off the Nexus Interface * Completing API spec alignment Unit tests aligned with changes in the API spec * Applying fix for bug #814518 Merging from lp:~salvatore-orlando/quantum/bug814518 * Adding controller and view builder for attachment resource * Merging the port profile client name fix * Earlier fix resulted in a different issue (profile client name, was also being used as profile name, hence breaking) * Truncated the port profile client name length to 16 characters (ucsm excepts max 17 chars) * Mergin fix for Bug 818321 * Merging approved OVS plugin configuration change branch. Thanks lp:danwent ! * Merging the brand new Quantum-client-library feature * Requests now send the Content-Type in the HTTP request * fix broken flush in db.network\_destroy, pep8 fixes * req/res alignment complete. Status code alignment ALMOST complete (need to sort out 200 vs 202 for create ops) * Vinkesh | Changed import orders according to pep8 recommendations * Including a flag to activate the NX-OS driver Updating the README documentation * merging branch for bug802772, which this branch is stacked on top of * WIP. Still need to align APIs for interface plug/unplug * Fixing pep8 errors * Adding the Nexus OS driver based on the new PlugIn structure * fix incorrect handling of duplicate network name, add exception for duplicate network name, and add unit test to confirm detection * WIP * Merging lp:quantum updates * Fixing syntax issue. I had a 2.7+ style dict comprehension, so I made it 2.6 friendly * Removing a debugging line * pep8 fix * Fixing API behaviour for throwing 400 error on invalid body. Adding unit test for creating a port without request body * make ovs plugin pay attention to port state * persistence of l2network & ucs plugins using mysql - db\_conn.ini - configuration details of making a connection to the database - db\_test\_plugin.py - contains abstraction methods for storing database values in a dict and unit test cases for DB testing - l2network\_db.py - db methods for l2network models - l2network\_models.py - class definitions for the l2 network tables - ucs\_db.py - db methods for ucs models - ucs\_models.py - class definition for the ucs tables dynamic loading of the 2nd layer plugin db's based on passed arguments Create, Delete, Get, Getall, Update database methods at - Quantum, L2Network and Ucs Unit test cases for create, delete, getall and update operations for L2Network and Ucs plugins pep8 checks done branch based off revision 34 plugin-framework * Vinkesh/Santhosh | Moved the stub classes in test\_extensions to a separate file extension\_stubs * Merged from trunk * bug802772 update exception handling in OVS plugin to use API exceptions * merged the latest changes from plugin-framework branch - revision 39 conforming to the new cisco plugin directory structure and moving all db related modules into cisco/db folder updated db\_test\_plugin.py - added import of cisco constants module - added LOG.getLogger for logging component name - updated import module paths for l2network\_models/db and ucs\_models/db to use the new directory structure - updated (rearranged) imports section to obey openstack alphabetical placement convention updated db\_conn.ini - updated database name from cisco\_naas to quantum\_l2network unit test cases ran successfully and pep8 checks done again * removing a few additional lines that aren't needed once we don't calculate port count * Adding a tests directory, this can be used for plugin-specific test cases * also remove line that computes portcount, as it is unneeded now that we don't return it * Including copyright info * merge branch for to fix bug817826 * For the modules to get added, missed in the earlier checkin * remove PortCount attribute of network object, as it is not in the spec and was causing us to hit bug 818321 (note: this commit does not fix the underlyingproblem with xml deserialization, it just makes sure we don't hit it with the existing API code) * Changed the directory structure to a more organized one. Fixed the imports to reflect the new structure * Merging the latest changes from lp:quantum * change default integration bridge from br100 to br-int to reflect new default for OVS vif-plugging in nova Diablo-3 release * fix bug 817826 and similar error in batch\_config.py * persistence of l2network & ucs plugins using mysql - db\_conn.ini - configuration details of making a connection to the database - db\_test\_plugin.py - contains abstraction methods for storing database values in a dict and unit test cases for DB testing - l2network\_db.py - db methods for l2network models - l2network\_models.py - class definitions for the l2 network tables - ucs\_db.py - db methods for ucs models - ucs\_models.py - class definition for the ucs tables dynamic loading of the 2nd layer plugin db's based on passed arguments Create, Delete, Get, Getall, Update database methods at - Quantum, L2Network and Ucs Unit test cases for create, delete, getall and update operations for L2Network and Ucs plugins pep8 checks done branch based off revision 34 plugin-framework * merge Salvatore's api branch with fixes for tests. Tweaking branch to remove unwanted bin/quantum.py as part of merge * Merging in main repo updates * Updating to fix some SSL issues * Removing extra quantum.py file from source control removing unused import from quantum/api/\_\_init\_\_.py * Apply fix for bug #817813 Merging lp:~danwent/quantum/bug817813 * Apply fix for bug #814012 Merging lp:~danwent/quantum/bug814012 * Apply fix for bug #814517 merging lp:~tylesmit/quantum/quantum-bug-814517 * bug 817813: default provider in plugins.ini accidentally changed. Changing it back to FakePlugin * Changed the param name "network-name" to "net-name" since the Quantum service expects the later * Removing some legacy code from the unit tests * Adding unit tests to cover the client library * Changing the CLI to use the new client library * Adding refactored API Client * pep8 fixes * fix bug 814012, add unit tests for it * Resolving Bug 814517 which caused XML to have extra whitespace * Vinkesh/Santhosh | Removed loading extensions from 'contrib' and fixed an indentation bug while loading extensions * Santhosh/Rajaram|modified extensions section in README * Rajaram/Santhosh | Added logging to the PluginAwareExtensionManager failures * Rajaram/Santhosh|Added plugin interface in foxinsox and Updated README * Rajaram/Santhosh|quantum manager loads plugin only once, even though both extension middleware and APIRouter calls it * Santhosh/Rajaram|latest merge from quantum and made extensions use options to load plugin * Apply fix for bug #797419 merging lp:~salvatore-orlando/quantum/bug797419 * Re-fixing issues with XML deserialization (changes got lost in merges with trunk) Adapting assertions in unit tests merged from trunk to reflect changes in the API due to RFE requested by Erik Carlin * Rajaram/Vinkesh | Plugins advertise which extensions it supports * Merging branch lp:~salvatore-orlando/quantum/bug802892 Fixing bug #802892 * Merging branch lp:~netstack/quantum/quantum-unit-tests * Fixing silly pep8 error * doh * Restoring quantum\_plugin\_base to previous state. Will discuss in the future whether allow API layer to pass options to plugins upon initialization * Vinkesh/Santhosh | Added tests to check the member and collection custom actions of ResourceExtensions * Vinkesh/Deepak | Moved plugin related checks in ExtensionManager code to PluginAwareExtensionManager * Deepak/Vinkesh | Added an base abstract class which can be inherited by PluginInterface class which defines the contract expected by extension * Vinkesh/Deepak| Added doc and small refactoring * Unit tests for API completed fixed pep8 errors * Add TESTING document: description and polices for quantum tests * Adding more unit tests * Deepak/Santhosh | ExtensionManager verifies that plugin implements the interface expected by the extension * Santhosh/Deepak | Made supports\_extension method optional for plugin, plugin will be loaded only once * Merged from quantum trunk * Santhosh/deepak| Load extensions supported by plugin * add extension code in.(last push does not include this directory.) * add api extensions (including portprofiles resources and associate/disassociate actions.) * Changes to support port-profile extension. Fixed an error in the README file * Very initial version of the nxos driver .... lets call it ver 0.0.1! * Removing code related to functional tests * Porting shell script get-vif.sh to python module get-vif.py for cisco ucsm module * Required for recognizing the "cisco" package. Missed in the initial checkin * Applying fix for bug #804237 from branch lp:~salvatore-orlando/quantum/bug804237 * minor pep8 fix * Changed some credentials (does not affect functionality) * This file is not required * Initial checkin for the L2-Network Plugin with all the associated modules and artifacts * Rajaram/Santosh|misc readablity improvements to extension tests * Santosh/Rajaram| added extenstion test to show header extensibility * Rajaram/Vinkesh | Added tests to confirm extensions can edit previously uneditable field * removing pep8 errors * Added more unit tests for API Starting work on functional tests, importing code from Glance * Now REALLY using in-memory db * Adapated plugin infrastructure to allow API to pass options to plugins Now using in-memory sqlite db for tests on FakePlugin teardown() now 'resets' the in-memory db Adding unit tests for APIs * Fixing error introduced in find\_config * Removing excess debug line * Fixing syntax errors in db/models.py * Temporary commit * Now loading plugin before setting up routes. Passing same plugin instance to API controllers * Adding unit test Applying pep8 fixes * Starting implementation of unit tests Fixing minor bugs with FakePlugin * Removing static data for FakePlugin * - Unit tests will use FakePlugin - FakePlugin adapted to db API with sqlite - db Models updated to inherit from generic Quantum Base model (provides utility functions and capabilities for treating db objects as dicts - see nova.db.models.NovaBase) - functional tests commented out temporarily. Will un-comment when code for starting actual service is in place * Adding Routes>=1.12.3 to tools/pip-requires * Work in progress - just starting * ...and again! * removing "quantum" folder as well from etc * removing api-paste.ini * Addressing comments from Somik * Merging dan wendlandt's bugfixes for Bug #800466 and improvements that enable Quantum to seamlessly run on KVM! * fix pep8 introduced by trunk merge * A small start on unit tests: mostly a proof of concept that contains a test for api/ports.py * Added some more plugin agnostic tests (attachment and negative tests) and some pep8 fixes * merge * more pep8 goodness * Fixing bug #798262 * refactor batch\_config, allow multiple attaches with the empty string * Merge: bzr merge lp:~bgh/quantum/bugfixes * Fix cut and paste error in api\_unplug\_iface * Fixing bug #798261 * no-commit * Santhosh/Vinkesh | Added extensions framework * merge and pep8 cleanup * Merging latest changes from parent repo - lp:network-service , Parent repo had approved merge proposal for merging lp:~santhom/network-service/quantum\_testing\_framework , which has now been merged into lp:network-service * Merging pep8 and functional test related changes lp:~santhom/network-service/quantum\_testing\_framework branch * add example to usage string for batch\_config.py * Bug fixes and clean-up, including supporting libvirt * Fix typo in mysql package check * Fix typo in mysql package check * Adding support for 'detail' action on networks objects * README fixes * Santhosh/Deepak | Fixed the import issue and config.load\_paste\_app issue * Santhosh/Vinkesh | Fixed all the pep8 violations. Modified the 'req' to 'request' across all the services and wsgi so that it's consistent with other projects * Santhosh/Vinkesh | Added the testing framework. Moved the smoketest to tests/functional * merged remote README changes * Fix cli.py from last merge when it got overwritten * Fixing pep8 errors removing excess debug lines * Add dependencies to README and fix whitespace * Fix merge indentation errors * Merged Brad's ovsplugin code * pep8 changes for quantum-framework code pieces * Update Quantum README file with instructions to launch the service and get going * Updated quantum\_plugin\_base with with return type dataformats as well as exceptions * Added a basic README file and updated Quantum plugin base class with appropriate exceptions * Initial commit of exceptions that are raised by a quantum plugin * Make the wording a little clearer * Remove -a option from examples (it no longer exists) * Make the API the default * Address Dan's review comments * Make the manager a little smarter about finding its config file * Fix another TODO: remove main function from manager * Fix detail\_net and list\_ports commands * Remove get\_all\_interfaces and fix detail\_network commands * Initial version of openvswitch plugin * \* Merged changes from Salvatore's branch - quantum-api-workinprogress \* Removed spurious methods from quantum\_base\_plugin class. \* Updated the sample plugins to be compliant with the new QuantumBase class * Update readme with quantum specific instructions * Address some of the remaining TODOs and general cleanup * Add headers * Initial cut of openvswitch plugin * Add database models/functions for ports and networks * Print the command list in the help * Whitespace fixes * Added api functions for the interface commands * Initial rework of cli to use the WS api * Copy over miniclient from testscripts and port tests.py to use unittest * Adding ports.py to source control * pep8 fixes (1st batch) * First working version of Quantum API * Adding views/networks.py to bzr * Adding serialization/deserilization for network resources. Adding fake plugin * networks api with final URL structure. No serialization yet * Implementing interface with plugin * adpating wsgi files * Work in progress on network API * Adding first files for quantum API * Minor fixes: indentation in bin/quantum and fix import in config.py * Adding api paste configuration file * Removing .pydevproject from version control * Branching from quantum-framework * Adding flags.py to infrastructure code * Move plugin configuration to plugins.ini - a config file * 1) Created a DummDataPlugin in SamplePlugin module * merged salvatore's changes to local branch * 1) Added a bare-bones framework for quantum plugins. 2) Created demo quantum plugin that conforms to QuantumPluginBase Abstract class specification. 3) Demonstrated plugin registration and invocation using the demo plugin called "QuantumEchoPlugin" 4) Created the initial file structure for a quantum CLI 5) Seeded the utils module that will contain frequently used Quantum utilities. 6) Modified the manager module to initialize and register the quantum plugin defined in a configuration file. I have hard-coded the path to plugin for now but this will move to a quantum.conf file * Fixing pep8 errors * adding /bzrignore to precent checking in pyc files and that sort of stuff.. * Pushing initial started code based on Glance project and infrstructure work done by the melange team * Merging in Shweta's fixes from the review by Sumit * Minor Fix in ucs tests * Fixing issues discussed in merge prop. The UCS Inventory clears the DB on teardown. The multiblade tests now check to see if a port exists in the db before deleting it. It checks to make sure the UCSInventory is set in the config * Adding UCS inventory tests * Merging in latest changes from lp:quantum * Merging in Shweta's test changes * Ading Ucs db tests * Removing excess imports * Fixing pep8 errors and pushing pylint score up to 8.57 * Fix for bug/893663 Making Cisco CLI usable from installed packages * Bug 903684: functions defined twice in utils.py * blueprint api-operational-status * Adds sqlalchemy support for ovs\_quantum\_plugin * bug 903581: remove etc/quantum.conf.sample as it is invalid * Fixing bug/903829 Making setup\_server.py not try to install quantum.conf.sample * Removing a couple extra lines * Adding some tests, fixing some bugs, and making the tearDown correctly remove PortProfiles * Adding author information * Removing a negative test until I can figure out how to implement it * Removing some negative tests until I can figure out how to implement them * Updating tests * Fixing port-related calls * Adding tests * Tweaking other multiblade tests * Updating multiblade create\_network test * Starting making multi\_blade model return data * Adding initial multi blade test file from Shubhangi vmware-nsx-12.0.1/.testr.conf0000666000175100017510000000036713244523345016031 0ustar zuulzuul00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./vmware_nsx/tests/unit} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list vmware-nsx-12.0.1/etc/0000775000175100017510000000000013244524600014501 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/etc/policy.json0000666000175100017510000001173213244523345016706 0ustar zuulzuul00000000000000{ "context_is_admin": "role:admin", "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s", "context_is_advsvc": "role:advsvc", "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s", "admin_only": "rule:context_is_admin", "regular_user": "", "shared": "field:networks:shared=True", "shared_firewalls": "field:firewalls:shared=True", "external": "field:networks:router:external=True", "default": "rule:admin_or_owner", "create_subnet": "rule:admin_or_network_owner", "get_subnet": "rule:admin_or_owner or rule:shared", "update_subnet": "rule:admin_or_network_owner", "delete_subnet": "rule:admin_or_network_owner", "create_network": "", "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc", "get_network:router:external": "rule:regular_user", "get_network:segments": "rule:admin_only", "get_network:provider:network_type": "rule:admin_only", "get_network:provider:physical_network": "rule:admin_only", "get_network:provider:segmentation_id": "rule:admin_only", "get_network:queue_id": "rule:admin_only", "create_network:shared": "rule:admin_only", "create_network:router:external": "rule:admin_only", "create_network:segments": "rule:admin_only", "create_network:provider:network_type": "rule:admin_only", "create_network:provider:physical_network": "rule:admin_only", "create_network:provider:segmentation_id": "rule:admin_only", "update_network": "rule:admin_or_owner", "update_network:segments": "rule:admin_only", "update_network:shared": "rule:admin_only", "update_network:provider:network_type": "rule:admin_only", "update_network:provider:physical_network": "rule:admin_only", "update_network:provider:segmentation_id": "rule:admin_only", "update_network:router:external": "rule:admin_only", "delete_network": "rule:admin_or_owner", "create_port": "", "create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc", "create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc", "create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "create_port:binding:host_id": "rule:admin_only", "create_port:binding:profile": "rule:admin_only", "create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "create_port:provider_security_groups": "rule:admin_only", "get_port": "rule:admin_or_owner or rule:context_is_advsvc", "get_port:queue_id": "rule:admin_only", "get_port:binding:vif_type": "rule:admin_only", "get_port:binding:vif_details": "rule:admin_only", "get_port:binding:host_id": "rule:admin_only", "get_port:binding:profile": "rule:admin_only", "update_port": "rule:admin_or_owner or rule:context_is_advsvc", "update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc", "update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "update_port:binding:host_id": "rule:admin_only", "update_port:binding:profile": "rule:admin_only", "update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "update_port:provider_security_groups": "rule:admin_only", "delete_port": "rule:admin_or_owner or rule:context_is_advsvc", "create_qos_queue": "rule:admin_only", "get_qos_queue": "rule:admin_only", "update_agent": "rule:admin_only", "delete_agent": "rule:admin_only", "get_agent": "rule:admin_only", "create_dhcp-network": "rule:admin_only", "delete_dhcp-network": "rule:admin_only", "get_dhcp-networks": "rule:admin_only", "create_l3-router": "rule:admin_only", "delete_l3-router": "rule:admin_only", "get_l3-routers": "rule:admin_only", "get_dhcp-agents": "rule:admin_only", "get_l3-agents": "rule:admin_only", "get_loadbalancer-agent": "rule:admin_only", "get_loadbalancer-pools": "rule:admin_only", "create_floatingip": "rule:regular_user", "create_floatingip:floating_ip_address": "rule:admin_only", "update_floatingip": "rule:admin_or_owner", "delete_floatingip": "rule:admin_or_owner", "get_floatingip": "rule:admin_or_owner", "create_network_profile": "rule:admin_only", "update_network_profile": "rule:admin_only", "delete_network_profile": "rule:admin_only", "get_network_profiles": "", "get_network_profile": "", "update_policy_profiles": "rule:admin_only", "get_policy_profiles": "", "get_policy_profile": "", "create_metering_label": "rule:admin_only", "delete_metering_label": "rule:admin_only", "get_metering_label": "rule:admin_only", "create_metering_label_rule": "rule:admin_only", "delete_metering_label_rule": "rule:admin_only", "get_metering_label_rule": "rule:admin_only", "get_service_provider": "rule:regular_user", "get_lsn": "rule:admin_only", "create_lsn": "rule:admin_only", } vmware-nsx-12.0.1/etc/policy.d/0000775000175100017510000000000013244524600016222 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/etc/policy.d/neutron-fwaas.json0000666000175100017510000000354413244523345021723 0ustar zuulzuul00000000000000{ "shared_firewalls": "field:firewalls:shared=True", "shared_firewall_policies": "field:firewall_policies:shared=True", "shared_firewall_rules": "field:firewall_rules:shared=True", "create_firewall": "", "update_firewall": "rule:admin_or_owner", "delete_firewall": "rule:admin_or_owner", "create_firewall:shared": "rule:admin_only", "update_firewall:shared": "rule:admin_only", "delete_firewall:shared": "rule:admin_only", "get_firewall": "rule:admin_or_owner or rule:shared_firewalls", "shared_firewall_groups": "field:firewall_groups:shared=True", "shared_firewall_policies": "field:firewall_policies:shared=True", "shared_firewall_rules": "field:firewall_rules:shared=True", "create_firewall_group": "", "update_firewall_group": "rule:admin_or_owner", "delete_firewall_group": "rule:admin_or_owner", "create_firewall_group:shared": "rule:admin_only", "update_firewall_group:shared": "rule:admin_only", "delete_firewall_group:shared": "rule:admin_only", "get_firewall_group": "rule:admin_or_owner or rule:shared_firewall_groups", "create_firewall_policy": "", "update_firewall_policy": "rule:admin_or_owner", "delete_firewall_policy": "rule:admin_or_owner", "create_firewall_policy:shared": "rule:admin_only", "update_firewall_policy:shared": "rule:admin_only", "delete_firewall_policy:shared": "rule:admin_only", "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewall_policies", "create_firewall_rule": "", "update_firewall_rule": "rule:admin_or_owner", "delete_firewall_rule": "rule:admin_or_owner", "create_firewall_rule:shared": "rule:admin_only", "update_firewall_rule:shared": "rule:admin_only", "delete_firewall_rule:shared": "rule:admin_only", "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewall_rules" } vmware-nsx-12.0.1/etc/policy.d/flow-classifier.json0000666000175100017510000000030613244523345022214 0ustar zuulzuul00000000000000{ "create_flow_classifier": "rule:admin_only", "update_flow_classifier": "rule:admin_only", "delete_flow_classifier": "rule:admin_only", "get_flow_classifier": "rule:admin_only", } vmware-nsx-12.0.1/etc/policy.d/dynamic-routing.json0000666000175100017510000000107113244523345022234 0ustar zuulzuul00000000000000{ "get_bgp_speaker": "rule:admin_only", "create_bgp_speaker": "rule:admin_only", "update_bgp_speaker": "rule:admin_only", "delete_bgp_speaker": "rule:admin_only", "get_bgp_peer": "rule:admin_only", "create_bgp_peer": "rule:admin_only", "update_bgp_peer": "rule:admin_only", "delete_bgp_peer": "rule:admin_only", "add_bgp_peer": "rule:admin_only", "remove_bgp_peer": "rule:admin_only", "add_gateway_network": "rule:admin_only", "remove_gateway_network": "rule:admin_only", "get_advertised_routes":"rule:admin_only", } vmware-nsx-12.0.1/etc/policy.d/network-gateways.json0000666000175100017510000000063513244523345022443 0ustar zuulzuul00000000000000{ "create_network_gateway": "rule:admin_or_owner", "update_network_gateway": "rule:admin_or_owner", "delete_network_gateway": "rule:admin_or_owner", "connect_network": "rule:admin_or_owner", "disconnect_network": "rule:admin_or_owner", "create_gateway_device": "rule:admin_or_owner", "update_gateway_device": "rule:admin_or_owner", "delete_gateway_device": "rule:admin_or_owner" } vmware-nsx-12.0.1/etc/policy.d/security-groups.json0000666000175100017510000000052013244523345022305 0ustar zuulzuul00000000000000{ "create_security_group:logging": "rule:admin_only", "update_security_group:logging": "rule:admin_only", "get_security_group:logging": "rule:admin_only", "create_security_group:provider": "rule:admin_only", "create_security_group:policy": "rule:admin_only", "update_security_group:policy": "rule:admin_only", } vmware-nsx-12.0.1/etc/policy.d/routers.json0000666000175100017510000000152013244523345020625 0ustar zuulzuul00000000000000{ "create_router:distributed": "rule:admin_or_owner", "get_router:distributed": "rule:admin_or_owner", "update_router:distributed": "rule:admin_or_owner", "get_router:ha": "rule:admin_only", "create_router": "rule:regular_user", "create_router:external_gateway_info:enable_snat": "rule:admin_or_owner", "create_router:ha": "rule:admin_only", "get_router": "rule:admin_or_owner", "update_router:external_gateway_info:enable_snat": "rule:admin_or_owner", "update_router:ha": "rule:admin_only", "delete_router": "rule:admin_or_owner", "add_router_interface": "rule:admin_or_owner", "remove_router_interface": "rule:admin_or_owner", "create_router:external_gateway_info:external_fixed_ips": "rule:admin_only", "update_router:external_gateway_info:external_fixed_ips": "rule:admin_only", } vmware-nsx-12.0.1/etc/oslo-config-generator/0000775000175100017510000000000013244524600020704 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/etc/oslo-config-generator/nsx.ini0000666000175100017510000000011413244523345022220 0ustar zuulzuul00000000000000[DEFAULT] output_file = etc/nsx.ini.sample wrap_width = 79 namespace = nsx vmware-nsx-12.0.1/etc/README.txt0000666000175100017510000000046713244523345016215 0ustar zuulzuul00000000000000To generate the sample vmware-nsx configuration files, run the following command from the top level of the vmware-nsx directory: tox -e genconfig If a 'tox' environment is unavailable, then you can run the following script instead to generate the configuration files: ./tools/generate_config_file_samples.sh vmware-nsx-12.0.1/devstack/0000775000175100017510000000000013244524600015532 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/devstack/README.rst0000666000175100017510000000404113244523345017227 0ustar zuulzuul00000000000000======================== Devstack external plugin ======================== Add and set the following in your local.conf/localrc file: enable_plugin vmware-nsx https://git.openstack.org/openstack/vmware-nsx For Nsx-mh: ----------- Q_PLUGIN=vmware_nsx PUBLIC_BRIDGE # bridge used for external connectivity, typically br-ex NSX_GATEWAY_NETWORK_INTERFACE # interface used to communicate with the NSX Gateway NSX_GATEWAY_NETWORK_CIDR # CIDR to configure $PUBLIC_BRIDGE, e.g. 172.24.4.211/24 For Nsx-v: ---------- Q_PLUGIN=vmware_nsx_v NSXV_MANAGER_URI # URL for NSXv manager (e.g - https://management_ip). NSXV_USER # NSXv username. NSXV_PASSWORD # NSXv password. NSXV_CLUSTER_MOID # clusters ids containing OpenStack hosts. NSXV_DATACENTER_MOID # datacenter id for edge deployment. NSXV_RESOURCE_POOL_ID # resource-pool id for edge deployment. NSXV_AVAILABILITY_ZONES # alternative resource-pools/data stores ids/edge_ha for edge deployment NSXV_DATASTORE_ID # datastore id for edge deployment. NSXV_EXTERNAL_NETWORK # id of logic switch for physical network connectivity. NSXV_VDN_SCOPE_ID # network scope id for VXLAN virtual-wires. NSXV_DVS_ID # Dvs id for VLAN based networks. NSXV_BACKUP_POOL # backup edge pools management range, # :[edge_size]::. # edge_type:'service'(service edge) or 'vdr'(distributed edge). # edge_size: 'compact', 'large'(by default), 'xlarge' or 'quadlarge'. # To enable the metadata service, the following variables should be also set: NSXV_MGT_NET_PROXY_IPS # management network IP address for metadata proxy. NSXV_MGT_NET_PROXY_NETMASK # management network netmask for metadata proxy. NSXV_NOVA_METADATA_IPS # IP addresses used by Nova metadata service. NSXV_NOVA_METADATA_PORT # TCP Port used by Nova metadata server. NSXV_MGT_NET_MOID # Network ID for management network connectivity vmware-nsx-12.0.1/devstack/localrc_nsx_v30000666000175100017510000000111313244523345020377 0ustar zuulzuul00000000000000enable_plugin vmware-nsx https://git.openstack.org/openstack/vmware-nsx ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-sch,n-cond,n-cauth,horizon,mysql,rabbit,sysstat,quantum,q-svc,q-dhcp,n-novnc,n-xvnc DATABASE_PASSWORD=password RABBIT_PASSWORD=password SERVICE_TOKEN=password SERVICE_PASSWORD=password ADMIN_PASSWORD=password Q_PLUGIN=vmware_nsx_v3 NSX_PASSWORD=Admin!23Admin DEFAULT_OVERLAY_TZ_UUID= EDGE_CLUSTER_UUID= NSX_MANAGER= NSX_CONTROLLERS= DHCP_PROFILE_UUID= METADATA_PROXY_UUID= DHCP_RELAY_SERVICE= vmware-nsx-12.0.1/devstack/lib/0000775000175100017510000000000013244524600016300 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/devstack/lib/vmware_nsx_v30000666000175100017510000002365613244523345021047 0ustar zuulzuul00000000000000#!/bin/bash # Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Neutron VMware NSX plugin # ------------------------- # Settings previously defined in devstack:lib/neutron-legacy NEUTRON_CONF_DIR=/etc/neutron export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini # The interface which has connectivity to the NSX Gateway uplink NSX_GATEWAY_NETWORK_INTERFACE=${NSX_GATEWAY_NETWORK_INTERFACE:-} # Override default 'True' in devstack:lib/neutron_plugins/services/l3 Q_USE_PROVIDERNET_FOR_PUBLIC=False # Native support from platform NATIVE_DHCP_METADATA=${NATIVE_DHCP_METADATA:-True} NATIVE_METADATA_ROUTE=${NATIVE_METADATA_ROUTE:-169.254.169.254/31} METADATA_PROXY_SHARED_SECRET=${METADATA_PROXY_SHARED_SECRET:-} # Save trace setting NSX_XTRACE=$(set +o | grep xtrace) set +o xtrace # File to store client certificate and PK CLIENT_CERT_FILE=${DEST}/data/neutron/client.pem source $TOP_DIR/lib/neutron_plugins/ovs_base dir=${GITDIR['vmware-nsx']}/devstack source $dir/lib/nsx_common function _version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; } function _ovsdb_connection { managers=(${NSX_MANAGER//,/ }) NSX_MGR_IP=${managers[0]} NSX_VER=$(curl -1 -s -k -u "$NSX_USER:$NSX_PASSWORD" -H 'Accept: application/json' https://$NSX_MGR_IP/api/v1/node | python -c 'import sys, json; print json.load(sys.stdin)["node_version"][:5]') if [ $(_version $NSX_VER) -ge $(_version 1.1.0) ]; then echo "unix:/var/run/vmware/nsx-agent/nsxagent_ovsdb.sock" else echo "tcp:127.0.0.1:6632" fi } function setup_integration_bridge { die_if_not_set $LINENO NSX_MANAGER "NSX_MANAGER has not been set!" die_if_not_set $LINENO NSX_USER "NSX_USER has not been set!" die_if_not_set $LINENO NSX_PASSWORD "NSX_PASSWORD has not been set!" # Ensure that the OVS params are set for the OVS utils iniset $NEUTRON_CONF DEFAULT ovs_integration_bridge $OVS_BRIDGE iniset $NEUTRON_CONF OVS ovsdb_connection $(_ovsdb_connection) iniset $NEUTRON_CONF OVS ovsdb_interface vsctl _neutron_ovs_base_setup_bridge $OVS_BRIDGE sudo ovs-vsctl set bridge $OVS_BRIDGE external_ids:bridge-id=nsx-managed sudo ovs-vsctl set-manager $(_ovsdb_connection) } function is_neutron_ovs_base_plugin { # This allows the deployer to decide whether devstack should install OVS. # By default, we install OVS, to change this behavior add "OVS_BASE=1" to your localrc file. # Note: Any KVM compute must have OVS installed on it. return ${OVS_BASE:-0} } function neutron_plugin_create_nova_conf { if [[ "$VIRT_DRIVER" != 'vsphere' ]]; then # if n-cpu or octavia is enabled, then setup integration bridge if is_service_enabled n-cpu || is_service_enabled octavia ; then setup_integration_bridge if is_service_enabled n-cpu ; then iniset $NOVA_CONF neutron ovs_bridge $OVS_BRIDGE fi fi fi # if n-api is enabled, then setup the metadata_proxy_shared_secret if is_service_enabled n-api; then iniset $NOVA_CONF neutron service_metadata_proxy True if [[ "$NATIVE_DHCP_METADATA" == "True" ]]; then iniset $NOVA_CONF neutron metadata_proxy_shared_secret $METADATA_PROXY_SHARED_SECRET if [[ "$METADATA_PROXY_USE_HTTPS" == "True" ]]; then iniset $NOVA_CONF DEFAULT enabled_ssl_apis metadata if [[ "$METADATA_PROXY_CERT_FILE" != "" ]]; then iniset $NOVA_CONF wsgi ssl_cert_file $METADATA_PROXY_CERT_FILE fi if [[ "$METADATA_PROXY_PRIV_KEY_FILE" != "" ]]; then iniset $NOVA_CONF wsgi ssl_key_file $METADATA_PROXY_PRIV_KEY_FILE fi fi fi fi } function neutron_plugin_install_agent_packages { # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents _neutron_ovs_base_install_agent_packages } function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware Q_PLUGIN_CONF_FILENAME=nsx.ini Q_PLUGIN_SRC_CONF_PATH=vmware-nsx/etc VMWARE_NSX_DIR=vmware-nsx # Uses oslo config generator to generate sample configuration file (cd $DEST/$VMWARE_NSX_DIR && exec ./tools/generate_config_file_samples.sh) mkdir -p /$Q_PLUGIN_CONF_PATH cp $DEST/$Q_PLUGIN_SRC_CONF_PATH/nsx.ini.sample /$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR/policy.d cp -vr $DEST/$Q_PLUGIN_SRC_CONF_PATH/policy.d/* $NEUTRON_CONF_DIR/policy.d/ Q_PLUGIN_CLASS="vmware_nsxv3" } function neutron_plugin_configure_debug_command { sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge "$PUBLIC_BRIDGE" } function neutron_plugin_configure_dhcp_agent { setup_integration_bridge iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_use_veth True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_integration_bridge $OVS_BRIDGE iniset $Q_DHCP_CONF_FILE OVS ovsdb_connection $(_ovsdb_connection) iniset $Q_DHCP_CONF_FILE OVS ovsdb_interface vsctl } function neutron_plugin_configure_l3_agent { # VMware NSX plugin does not run L3 agent die $LINENO "q-l3 should not be executed with VMware NSX plugin!" } function neutron_plugin_configure_plugin_agent { # VMware NSX plugin does not run L2 agent die $LINENO "q-agt must not be executed with VMware NSX plugin!" } function neutron_plugin_configure_service { nsxv3_configure_service iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_extension_drivers vmware_nsxv3_dns } function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } function neutron_plugin_check_adv_test_requirements { is_service_enabled q-dhcp && return 0 } function init_vmware_nsx_v3 { if (is_service_enabled q-svc || is_service_enabled neutron-api) && [[ "$NATIVE_DHCP_METADATA" == "True" ]]; then if ! is_set DHCP_PROFILE_UUID; then die $LINENO "DHCP profile needs to be configured!" fi if ! is_set METADATA_PROXY_UUID; then die $LINENO "Metadata proxy needs to be configured!" fi if is_service_enabled q-dhcp q-meta; then die $LINENO "Native support does not require DHCP and Metadata agents!" fi fi # Generate client certificate if [[ "$NSX_USE_CLIENT_CERT_AUTH" == "True" ]]; then nsxadmin -o generate -r certificate fi if ! is_set NSX_GATEWAY_NETWORK_INTERFACE; then echo "NSX_GATEWAY_NETWORK_INTERFACE not set not configuring routes" return fi if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address to set on $PUBLIC_BRIDGE was not specified. " echo "Defaulting to $NSX_GATEWAY_NETWORK_CIDR" fi # Make sure the interface is up, but not configured sudo ip link set $NSX_GATEWAY_NETWORK_INTERFACE up # Save and then flush the IP addresses on the interface addresses=$(ip addr show dev $NSX_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) sudo ip addr flush $NSX_GATEWAY_NETWORK_INTERFACE # Use the PUBLIC Bridge to route traffic to the NSX gateway # NOTE(armando-migliaccio): if running in a nested environment this will work # only with mac learning enabled, portsecurity and security profiles disabled # The public bridge might not exist for the NSX plugin if Q_USE_DEBUG_COMMAND is off # Try to create it anyway sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE sudo ovs-vsctl --may-exist add-port $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_INTERFACE # Flush all existing addresses on public bridge sudo ip addr flush dev $PUBLIC_BRIDGE nsx_gw_net_if_mac=$(ip link show $NSX_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') sudo ip link set address $nsx_gw_net_if_mac dev $PUBLIC_BRIDGE for address in $addresses; do sudo ip addr add dev $PUBLIC_BRIDGE $address done sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR sudo ip link set $PUBLIC_BRIDGE up } function stop_vmware_nsx_v3 { # Clean client certificate if exists nsxadmin -o clean -r certificate if ! is_set NSX_GATEWAY_NETWORK_INTERFACE; then echo "NSX_GATEWAY_NETWORK_INTERFACE was not configured." return fi if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address expected on $PUBLIC_BRIDGE was not specified. " echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR fi sudo ip addr del $NSX_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE # Save and then flush remaining addresses on the interface addresses=$(ip addr show dev $PUBLIC_BRIDGE | grep inet | awk {'print $2'}) sudo ip addr flush $PUBLIC_BRIDGE # Try to detach physical interface from PUBLIC_BRIDGE sudo ovs-vsctl del-port $NSX_GATEWAY_NETWORK_INTERFACE # Restore addresses on NSX_GATEWAY_NETWORK_INTERFACE for address in $addresses; do sudo ip addr add dev $NSX_GATEWAY_NETWORK_INTERFACE $address done } # Restore xtrace $NSX_XTRACE vmware-nsx-12.0.1/devstack/lib/vmware_nsx_v0000666000175100017510000000575413244523345020763 0ustar zuulzuul00000000000000#!/bin/bash # Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Neutron VMware NSXv plugin # -------------------------- # Save trace setting NSXV_XTRACE=$(set +o | grep xtrace) set +o xtrace dir=${GITDIR['vmware-nsx']}/devstack source $dir/lib/nsx_common function setup_integration_bridge { : } function is_neutron_ovs_base_plugin { # NSXv does not use OVS return 1 } function neutron_plugin_create_nova_conf { if [[ -n $NSXV_NOVA_METADATA_IPS ]]; then iniset $NOVA_CONF neutron service_metadata_proxy "True" iniset $NOVA_CONF neutron metadata_proxy_shared_secret "$NSXV_METADATA_SHARED_SECRET" fi } function neutron_plugin_install_agent_packages { # NSXv does not require this : } function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware Q_PLUGIN_CONF_FILENAME=nsx.ini Q_PLUGIN_SRC_CONF_PATH=vmware-nsx/etc VMWARE_NSX_DIR=vmware-nsx # Uses oslo config generator to generate sample configuration file (cd $DEST/$VMWARE_NSX_DIR && exec ./tools/generate_config_file_samples.sh) mkdir -p /$Q_PLUGIN_CONF_PATH cp $DEST/$Q_PLUGIN_SRC_CONF_PATH/nsx.ini.sample /$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR/policy.d cp -vr $DEST/$Q_PLUGIN_SRC_CONF_PATH/policy.d/* $NEUTRON_CONF_DIR/policy.d/ Q_PLUGIN_CLASS="vmware_nsxv" } function neutron_plugin_configure_debug_command { : } function neutron_plugin_configure_dhcp_agent { # VMware NSXv plugin does not run L3 agent die $LINENO "q-dhcp should not be executed with VMware NSXv plugin!" } function neutron_plugin_configure_l3_agent { # VMware NSXv plugin does not run L3 agent die $LINENO "q-l3 should not be executed with VMware NSXv plugin!" } function neutron_plugin_configure_plugin_agent { # VMware NSXv plugin does not run L2 agent die $LINENO "q-agt must not be executed with VMware NSXv plugin!" } function neutron_plugin_configure_service { nsxv_configure_service iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_extension_drivers vmware_nsxv_dns if [[ "$NSXV_USE_DVS_FEATURES" != "" ]]; then dvs_configure_service "$VMWAREAPI_IP" "$VMWAREAPI_USER" "$VMWAREAPI_PASSWORD" "$VMWAREAPI_CA_FILE" "$VMWAREAPI_INSECURE" "$VMWARE_DVS_NAME" fi } function neutron_plugin_setup_interface_driver { : } function neutron_plugin_check_adv_test_requirements { return 0 } # Restore xtrace $NSXV_XTRACE vmware-nsx-12.0.1/devstack/lib/vmware_nsx_tvd0000666000175100017510000002454313244523345021310 0ustar zuulzuul00000000000000#!/bin/bash # Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Neutron VMware NSX plugin # ------------------------- # Settings previously defined in devstack:lib/neutron-legacy NEUTRON_CONF_DIR=/etc/neutron export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini # The interface which has connectivity to the NSX Gateway uplink NSX_GATEWAY_NETWORK_INTERFACE=${NSX_GATEWAY_NETWORK_INTERFACE:-} # Override default 'True' in devstack:lib/neutron_plugins/services/l3 Q_USE_PROVIDERNET_FOR_PUBLIC=False # Native support from platform NATIVE_DHCP_METADATA=${NATIVE_DHCP_METADATA:-True} NATIVE_METADATA_ROUTE=${NATIVE_METADATA_ROUTE:-169.254.169.254/31} METADATA_PROXY_SHARED_SECRET=${METADATA_PROXY_SHARED_SECRET:-} # Save trace setting NSX_XTRACE=$(set +o | grep xtrace) set +o xtrace # File to store client certificate and PK CLIENT_CERT_FILE=${DEST}/data/neutron/client.pem source $TOP_DIR/lib/neutron_plugins/ovs_base dir=${GITDIR['vmware-nsx']}/devstack source $dir/lib/nsx_common function _version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; } function _ovsdb_connection { managers=(${NSX_MANAGER//,/ }) NSX_MGR_IP=${managers[0]} NSX_VER=$(curl -1 -s -k -u "$NSX_USER:$NSX_PASSWORD" -H 'Accept: application/json' https://$NSX_MGR_IP/api/v1/node | python -c 'import sys, json; print json.load(sys.stdin)["node_version"][:5]') if [ $(_version $NSX_VER) -ge $(_version 1.1.0) ]; then echo "unix:/var/run/vmware/nsx-agent/nsxagent_ovsdb.sock" else echo "tcp:127.0.0.1:6632" fi } function setup_integration_bridge { die_if_not_set $LINENO NSX_MANAGER "NSX_MANAGER has not been set!" die_if_not_set $LINENO NSX_USER "NSX_USER has not been set!" die_if_not_set $LINENO NSX_PASSWORD "NSX_PASSWORD has not been set!" # Ensure that the OVS params are set for the OVS utils iniset $NEUTRON_CONF DEFAULT ovs_integration_bridge $OVS_BRIDGE iniset $NEUTRON_CONF OVS ovsdb_connection $(_ovsdb_connection) iniset $NEUTRON_CONF OVS ovsdb_interface vsctl _neutron_ovs_base_setup_bridge $OVS_BRIDGE sudo ovs-vsctl set bridge $OVS_BRIDGE external_ids:bridge-id=nsx-managed sudo ovs-vsctl set-manager $(_ovsdb_connection) } function is_neutron_ovs_base_plugin { # This allows the deployer to decide whether devstack should install OVS. # By default, we install OVS, to change this behavior add "OVS_BASE=1" to your localrc file. # Note: Any KVM compute must have OVS installed on it. return ${OVS_BASE:-0} } function neutron_plugin_create_nova_conf { if [[ "$VIRT_DRIVER" != 'vsphere' ]]; then # if n-cpu or octavia is enabled, then setup integration bridge if is_service_enabled n-cpu || is_service_enabled octavia ; then setup_integration_bridge if is_service_enabled n-cpu ; then iniset $NOVA_CONF neutron ovs_bridge $OVS_BRIDGE fi fi fi # if n-api is enabled, then setup the metadata_proxy_shared_secret if is_service_enabled n-api; then iniset $NOVA_CONF neutron service_metadata_proxy True if [[ "$NATIVE_DHCP_METADATA" == "True" ]]; then iniset $NOVA_CONF neutron metadata_proxy_shared_secret $METADATA_PROXY_SHARED_SECRET if [[ "$METADATA_PROXY_USE_HTTPS" == "True" ]]; then iniset $NOVA_CONF DEFAULT enabled_ssl_apis metadata if [[ "$METADATA_PROXY_CERT_FILE" != "" ]]; then iniset $NOVA_CONF wsgi ssl_cert_file $METADATA_PROXY_CERT_FILE fi if [[ "$METADATA_PROXY_PRIV_KEY_FILE" != "" ]]; then iniset $NOVA_CONF wsgi ssl_key_file $METADATA_PROXY_PRIV_KEY_FILE fi fi fi fi } function neutron_plugin_install_agent_packages { # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents _neutron_ovs_base_install_agent_packages } function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware Q_PLUGIN_CONF_FILENAME=nsx.ini Q_PLUGIN_SRC_CONF_PATH=vmware-nsx/etc VMWARE_NSX_DIR=vmware-nsx # Uses oslo config generator to generate sample configuration file (cd $DEST/$VMWARE_NSX_DIR && exec ./tools/generate_config_file_samples.sh) mkdir -p /$Q_PLUGIN_CONF_PATH cp $DEST/$Q_PLUGIN_SRC_CONF_PATH/nsx.ini.sample /$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR/policy.d cp -vr $DEST/$Q_PLUGIN_SRC_CONF_PATH/policy.d/* $NEUTRON_CONF_DIR/policy.d/ Q_PLUGIN_CLASS="vmware_nsxtvd" } function neutron_plugin_configure_debug_command { sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge "$PUBLIC_BRIDGE" } function neutron_plugin_configure_dhcp_agent { setup_integration_bridge iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_use_veth True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_integration_bridge $OVS_BRIDGE iniset $Q_DHCP_CONF_FILE OVS ovsdb_connection $(_ovsdb_connection) iniset $Q_DHCP_CONF_FILE OVS ovsdb_interface vsctl } function neutron_plugin_configure_l3_agent { # VMware NSX plugin does not run L3 agent die $LINENO "q-l3 should not be executed with VMware NSX plugin!" } function neutron_plugin_configure_plugin_agent { # VMware NSX plugin does not run L2 agent die $LINENO "q-agt must not be executed with VMware NSX plugin!" } function neutron_plugin_configure_service { nsxv3_configure_service nsxv_configure_service dvs_configure_service "$DVS_VMWAREAPI_IP" "$DVS_VMWAREAPI_USER" "$DVS_VMWAREAPI_PASSWORD" "$DVS_VMWAREAPI_CA_FILE" "$DVS_VMWAREAPI_INSECURE" "$VMWARE_DVS_NAME" iniset /$Q_PLUGIN_CONF_FILE nsx_tvd nsx_v_extension_drivers vmware_nsxv_dns iniset /$Q_PLUGIN_CONF_FILE nsx_tvd nsx_v3_extension_drivers vmware_nsxv3_dns iniset /$Q_PLUGIN_CONF_FILE nsx_tvd dvs_extension_drivers vmware_dvs_dns iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_availability_zones $NSX_DEFAULT_AZ } function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } function neutron_plugin_check_adv_test_requirements { is_service_enabled q-dhcp && return 0 } function init_vmware_nsx_tvd { if (is_service_enabled q-svc || is_service_enabled neutron-api) && [[ "$NATIVE_DHCP_METADATA" == "True" ]]; then if ! is_set DHCP_PROFILE_UUID; then die $LINENO "DHCP profile needs to be configured!" fi if ! is_set METADATA_PROXY_UUID; then die $LINENO "Metadata proxy needs to be configured!" fi if is_service_enabled q-dhcp q-meta; then die $LINENO "Native support does not require DHCP and Metadata agents!" fi fi # Generate client certificate if [[ "$NSX_USE_CLIENT_CERT_AUTH" == "True" ]]; then nsxadmin -o generate -r certificate fi if ! is_set NSX_GATEWAY_NETWORK_INTERFACE; then echo "NSX_GATEWAY_NETWORK_INTERFACE not set not configuring routes" return fi if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address to set on $PUBLIC_BRIDGE was not specified. " echo "Defaulting to $NSX_GATEWAY_NETWORK_CIDR" fi # Make sure the interface is up, but not configured sudo ip link set $NSX_GATEWAY_NETWORK_INTERFACE up # Save and then flush the IP addresses on the interface addresses=$(ip addr show dev $NSX_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) sudo ip addr flush $NSX_GATEWAY_NETWORK_INTERFACE # Use the PUBLIC Bridge to route traffic to the NSX gateway # NOTE(armando-migliaccio): if running in a nested environment this will work # only with mac learning enabled, portsecurity and security profiles disabled # The public bridge might not exist for the NSX plugin if Q_USE_DEBUG_COMMAND is off # Try to create it anyway sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE sudo ovs-vsctl --may-exist add-port $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_INTERFACE # Flush all existing addresses on public bridge sudo ip addr flush dev $PUBLIC_BRIDGE nsx_gw_net_if_mac=$(ip link show $NSX_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') sudo ip link set address $nsx_gw_net_if_mac dev $PUBLIC_BRIDGE for address in $addresses; do sudo ip addr add dev $PUBLIC_BRIDGE $address done sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR sudo ip link set $PUBLIC_BRIDGE up } function stop_vmware_nsx_tvd { # Clean client certificate if exists nsxadmin -o clean -r certificate if ! is_set NSX_GATEWAY_NETWORK_INTERFACE; then echo "NSX_GATEWAY_NETWORK_INTERFACE was not configured." return fi if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address expected on $PUBLIC_BRIDGE was not specified. " echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR fi sudo ip addr del $NSX_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE # Save and then flush remaining addresses on the interface addresses=$(ip addr show dev $PUBLIC_BRIDGE | grep inet | awk {'print $2'}) sudo ip addr flush $PUBLIC_BRIDGE # Try to detach physical interface from PUBLIC_BRIDGE sudo ovs-vsctl del-port $NSX_GATEWAY_NETWORK_INTERFACE # Restore addresses on NSX_GATEWAY_NETWORK_INTERFACE for address in $addresses; do sudo ip addr add dev $NSX_GATEWAY_NETWORK_INTERFACE $address done } # Restore xtrace $NSX_XTRACE vmware-nsx-12.0.1/devstack/lib/vmware_dvs0000666000175100017510000000725713244523345020422 0ustar zuulzuul00000000000000#!/bin/bash # Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Neutron VMware DVS plugin # ------------------------- # Settings previously defined in devstack:lib/neutron-legacy NEUTRON_CONF_DIR=/etc/neutron export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini # Save trace setting DVS_XTRACE=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base source $dir/lib/nsx_common DVS_BRIDGE=${DVS_BRIDGE:-br-dvs} DVS_INTERFACE=${DVS_INTERFACE:-eth1} function setup_integration_bridge { # remove integration bridge created by Neutron for bridge in $(sudo ovs-vsctl list-br | grep -o -e $DVS_BRIDGE); do sudo ovs-vsctl del-br ${bridge} done _neutron_ovs_base_setup_bridge $DVS_BRIDGE sudo ovs-vsctl add-port $DVS_BRIDGE $DVS_INTERFACE } function is_neutron_ovs_base_plugin { # DVS uses OVS, but not the l3-agent return 0 } function neutron_plugin_create_nova_conf { # if n-cpu is enabled, then setup integration bridge if is_service_enabled n-cpu; then setup_integration_bridge fi } function neutron_plugin_install_agent_packages { # VMware DVS Plugin does not run q-agt, but it currently needs dhcp and metadata agents _neutron_ovs_base_install_agent_packages } function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware Q_PLUGIN_CONF_FILENAME=nsx.ini Q_PLUGIN_SRC_CONF_PATH=vmware-nsx/etc VMWARE_NSX_DIR=vmware-nsx # Uses oslo config generator to generate sample configuration file (cd $DEST/$VMWARE_NSX_DIR && exec ./tools/generate_config_file_samples.sh) mkdir -p /$Q_PLUGIN_CONF_PATH cp $DEST/$Q_PLUGIN_SRC_CONF_PATH/nsx.ini.sample /$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME Q_PLUGIN_CLASS="vmware_dvs" } function neutron_plugin_configure_debug_command { # TBD (garyk) : } function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_integration_bridge $OVS_BRIDGE iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_driver "vmware_nsx.plugins.dvs.dhcp.Dnsmasq" } function neutron_plugin_configure_l3_agent { # VMware DVS plugin does not run L3 agent die $LINENO "q-l3 should not be executed with VMware DVS plugin!" } function neutron_plugin_configure_plugin_agent { # VMware DVS plugin does not run L2 agent die $LINENO "q-agt must not be executed with VMware DVS plugin!" } function neutron_plugin_configure_service { dvs_configure_service "$VMWAREAPI_IP" "$VMWAREAPI_USER" "$VMWAREAPI_PASSWORD" "$VMWAREAPI_CA_FILE" "$VMWAREAPI_INSECURE" "$VMWARE_DVS_NAME" iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_extension_drivers vmware_dvs_dns } function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } function neutron_plugin_check_adv_test_requirements { is_service_enabled q-dhcp && return 0 } # Restore xtrace $DVS_XTRACE vmware-nsx-12.0.1/devstack/lib/vmware_nsx0000666000175100017510000002121413244523345020423 0ustar zuulzuul00000000000000#!/bin/bash # Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Neutron VMware NSX plugin # ------------------------- # Settings previously defined in devstack:lib/neutron-legacy NEUTRON_CONF_DIR=/etc/neutron export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST} # Save trace setting NSX_XTRACE=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base function setup_integration_bridge { _neutron_ovs_base_setup_bridge $OVS_BRIDGE # Set manager to NSX controller (1st of list) if [[ "$NSX_CONTROLLERS" != "" ]]; then # Get the first controller controllers=(${NSX_CONTROLLERS//,/ }) OVS_MGR_IP=${controllers[0]} else die $LINENO "Error - No controller specified. Unable to set a manager for OVS" fi sudo ovs-vsctl set-manager ssl:$OVS_MGR_IP } function is_neutron_ovs_base_plugin { # NSX uses OVS, but not the l3-agent return 0 } function neutron_plugin_create_nova_conf { # if n-cpu is enabled, then setup integration bridge if is_service_enabled n-cpu; then setup_integration_bridge fi } function neutron_plugin_install_agent_packages { # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents _neutron_ovs_base_install_agent_packages } function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware Q_PLUGIN_CONF_FILENAME=nsx.ini Q_PLUGIN_SRC_CONF_PATH=vmware-nsx/etc VMWARE_NSX_DIR=vmware-nsx # Uses oslo config generator to generate sample configuration file (cd $DEST/$VMWARE_NSX_DIR && exec ./tools/generate_config_file_samples.sh) mkdir -p /$Q_PLUGIN_CONF_PATH cp $DEST/$Q_PLUGIN_SRC_CONF_PATH/nsx.ini.sample /$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME Q_PLUGIN_CLASS="vmware_nsx" } function neutron_plugin_configure_debug_command { sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge "$PUBLIC_BRIDGE" } function neutron_plugin_configure_dhcp_agent { setup_integration_bridge iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_use_veth True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_integration_bridge $OVS_BRIDGE } function neutron_plugin_configure_l3_agent { # VMware NSX plugin does not run L3 agent die $LINENO "q-l3 should not be executed with VMware NSX plugin!" } function neutron_plugin_configure_plugin_agent { # VMware NSX plugin does not run L2 agent die $LINENO "q-agt must not be executed with VMware NSX plugin!" } function neutron_plugin_configure_service { if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS fi if [[ "$MAX_LP_PER_OVERLAY_LS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS fi if [[ "$FAILOVER_TIME" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsx failover_time $FAILOVER_TIME fi if [[ "$CONCURRENT_CONNECTIONS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsx concurrent_connections $CONCURRENT_CONNECTIONS fi if [[ "$DEFAULT_TZ_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_tz_uuid $DEFAULT_TZ_UUID else die $LINENO "The VMware NSX plugin won't work without a default transport zone." fi if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID Q_L3_ENABLED=True Q_L3_ROUTER_PER_TENANT=True iniset /$Q_PLUGIN_CONF_FILE nsx metadata_mode access_network fi if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID fi # NSX_CONTROLLERS must be a comma separated string if [[ "$NSX_CONTROLLERS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_controllers $NSX_CONTROLLERS else die $LINENO "The VMware NSX plugin needs at least an NSX controller." fi if [[ "$NSX_USER" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_user $NSX_USER fi if [[ "$NSX_PASSWORD" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_password $NSX_PASSWORD fi if [[ "$NSX_HTTP_TIMEOUT" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NSX_HTTP_TIMEOUT fi if [[ "$NSX_RETRIES" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT retries $NSX_RETRIES fi if [[ "$NSX_REDIRECTS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NSX_REDIRECTS fi if [[ "$AGENT_MODE" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsx agent_mode $AGENT_MODE if [[ "$AGENT_MODE" == "agentless" ]]; then if [[ "$DEFAULT_SERVICE_CLUSTER_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_service_cluster_uuid $DEFAULT_SERVICE_CLUSTER_UUID else die $LINENO "Agentless mode requires a service cluster." fi iniset /$Q_PLUGIN_CONF_FILE nsx_metadata metadata_server_address $Q_META_DATA_IP fi fi } function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } function neutron_plugin_check_adv_test_requirements { is_service_enabled q-dhcp && return 0 } function init_vmware_nsx { if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address to set on $PUBLIC_BRIDGE was not specified. " echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR fi # Make sure the interface is up, but not configured sudo ip link set $NSX_GATEWAY_NETWORK_INTERFACE up # Save and then flush the IP addresses on the interface addresses=$(ip addr show dev $NSX_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) sudo ip addr flush $NSX_GATEWAY_NETWORK_INTERFACE # Use the PUBLIC Bridge to route traffic to the NSX gateway # NOTE(armando-migliaccio): if running in a nested environment this will work # only with mac learning enabled, portsecurity and security profiles disabled # The public bridge might not exist for the NSX plugin if Q_USE_DEBUG_COMMAND is off # Try to create it anyway sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE sudo ovs-vsctl --may-exist add-port $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_INTERFACE # Flush all existing addresses on public bridge sudo ip addr flush dev $PUBLIC_BRIDGE nsx_gw_net_if_mac=$(ip link show $NSX_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') sudo ip link set address $nsx_gw_net_if_mac dev $PUBLIC_BRIDGE for address in $addresses; do sudo ip addr add dev $PUBLIC_BRIDGE $address done sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR sudo ip link set $PUBLIC_BRIDGE up } function stop_vmware_nsx { if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address expected on $PUBLIC_BRIDGE was not specified. " echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR fi sudo ip addr del $NSX_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE # Save and then flush remaining addresses on the interface addresses=$(ip addr show dev $PUBLIC_BRIDGE | grep inet | awk {'print $2'}) sudo ip addr flush $PUBLIC_BRIDGE # Try to detach physical interface from PUBLIC_BRIDGE sudo ovs-vsctl del-port $NSX_GATEWAY_NETWORK_INTERFACE # Restore addresses on NSX_GATEWAY_NETWORK_INTERFACE for address in $addresses; do sudo ip addr add dev $NSX_GATEWAY_NETWORK_INTERFACE $address done } function check_vmware_nsx { neutron-check-nsx-config $NEUTRON_CONF_DIR/plugins/vmware/nsx.ini } # Restore xtrace $NSX_XTRACE vmware-nsx-12.0.1/devstack/lib/nsx_common0000666000175100017510000001242613244523345020417 0ustar zuulzuul00000000000000#!/bin/bash # Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Common VMware NSXv and NSXv3 plugin # ----------------------------------- # ensure we don't re-source this in the same environment [[ -z "$_NSX_COMMON" ]] || return 0 declare -r -g _NSX_COMMON=1 function _nsxv_ini_set { if [[ $2 != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsxv $1 $2 fi } function nsxv_configure_service { if [[ "$NSX_L2GW_DRIVER" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_l2gw_driver $NSX_L2GW_DRIVER fi _nsxv_ini_set password "$NSXV_PASSWORD" _nsxv_ini_set user "$NSXV_USER" _nsxv_ini_set vdn_scope_id "$NSXV_VDN_SCOPE_ID" _nsxv_ini_set dvs_id "$NSXV_DVS_ID" _nsxv_ini_set manager_uri "$NSXV_MANAGER_URI" _nsxv_ini_set ca_file "$NSXV_CA_FILE" _nsxv_ini_set insecure "$NSXV_INSECURE" _nsxv_ini_set datacenter_moid "$NSXV_DATACENTER_MOID" _nsxv_ini_set datastore_id "$NSXV_DATASTORE_ID" _nsxv_ini_set resource_pool_id "$NSXV_RESOURCE_POOL_ID" _nsxv_ini_set availability_zones "$NSXV_AVAILABILITY_ZONES" _nsxv_ini_set external_network "$NSXV_EXTERNAL_NETWORK" _nsxv_ini_set cluster_moid "$NSXV_CLUSTER_MOID" _nsxv_ini_set backup_edge_pool "$NSXV_BACKUP_POOL" _nsxv_ini_set mgt_net_proxy_ips "$NSXV_MGT_NET_PROXY_IPS" _nsxv_ini_set mgt_net_moid "$NSXV_MGT_NET_MOID" _nsxv_ini_set mgt_net_proxy_netmask "$NSXV_MGT_NET_PROXY_NETMASK" _nsxv_ini_set nova_metadata_port "$NSXV_NOVA_METADATA_PORT" _nsxv_ini_set nova_metadata_ips "$NSXV_NOVA_METADATA_IPS" _nsxv_ini_set metadata_shared_secret "$NSXV_METADATA_SHARED_SECRET" _nsxv_ini_set metadata_insecure "$NSXV_METADATA_INSECURE" _nsxv_ini_set metadata_nova_client_cert "$NSXV_METADATA_NOVA_CERT" _nsxv_ini_set metadata_nova_client_priv_key "$NSXV_METADATA_NOVA_PRIV_KEY" _nsxv_ini_set metadata_service_allowed_ports "$NSXV_METADATA_SERVICE_ALLOWED_PORTS" _nsxv_ini_set edge_ha "$NSXV_EDGE_HA" _nsxv_ini_set exclusive_router_appliance_size "$NSXV_EXCLUSIVE_ROUTER_APPLIANCE_SIZE" _nsxv_ini_set use_dvs_features "$NSXV_USE_DVS_FEATURES" _nsxv_ini_set use_nsx_policies "$NSXV_USE_NSX_POLICIES" _nsxv_ini_set default_policy_id "$NSXV_DEFAULT_POLICY_ID" _nsxv_ini_set allow_tenant_rules_with_policy "$NSXV_ALLOW_TENANT_RULES_WITH_POLICY" } function _dvs_ini_set { if [[ $2 != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE dvs $1 $2 fi } function dvs_configure_service { _dvs_ini_set host_ip $1 _dvs_ini_set host_username $2 _dvs_ini_set host_password $3 _dvs_ini_set ca_file $4 _dvs_ini_set insecure $5 _dvs_ini_set dvs_name $6 } function _nsxv3_ini_set { if [[ -z $1 || -z $2 ]]; then if [[ $3 != "" ]]; then die $LINENO $3 fi fi if [[ $2 != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsx_v3 $1 $2 fi } function nsxv3_configure_service { _nsxv3_ini_set default_overlay_tz $DEFAULT_OVERLAY_TZ_UUID "The VMware NSX plugin won't work without a default transport zone." _nsxv3_ini_set default_vlan_tz $DEFAULT_VLAN_TZ_UUID if [[ "$DEFAULT_TIER0_ROUTER_UUID" != "" ]]; then _nsxv3_ini_set default_tier0_router $DEFAULT_TIER0_ROUTER_UUID Q_L3_ENABLED=True Q_L3_ROUTER_PER_TENANT=True fi # NSX_MANAGER must be a comma separated string if [[ "$NSX_MANAGERS" != "" ]]; then _nsxv3_ini_set nsx_api_managers $NSX_MANAGERS elif [[ "$NSX_MANAGER" != "" ]]; then _nsxv3_ini_set nsx_api_managers $NSX_MANAGER else die $LINENO "The VMware NSX plugin needs at least one NSX manager." fi if [[ "$NSX_L2GW_DRIVER" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_l2gw_driver $NSX_L2GW_DRIVER fi _nsxv3_ini_set ens_support $ENS_SUPPORT _nsxv3_ini_set nsx_api_user $NSX_USER _nsxv3_ini_set nsx_api_password $NSX_PASSWORD _nsxv3_ini_set retries $NSX_RETRIES _nsxv3_ini_set insecure $NSX_INSECURE _nsxv3_ini_set ca_file $NSX_CA_FILE _nsxv3_ini_set default_bridge_cluster $DEFAULT_BRIDGE_CLUSTER_UUID _nsxv3_ini_set native_dhcp_metadata $NATIVE_DHCP_METADATA if [[ "$NATIVE_DHCP_METADATA" == "True" ]]; then _nsxv3_ini_set native_metadata_route $NATIVE_METADATA_ROUTE _nsxv3_ini_set dhcp_profile $DHCP_PROFILE_UUID _nsxv3_ini_set metadata_proxy $METADATA_PROXY_UUID _nsxv3_ini_set dhcp_relay_service $DHCP_RELAY_SERVICE iniset $NEUTRON_CONF DEFAULT dhcp_agent_notification False fi if [[ "$NSX_USE_CLIENT_CERT_AUTH" == "True" ]]; then _nsxv3_ini_set nsx_use_client_auth "True" _nsxv3_ini_set nsx_client_cert_file "$CLIENT_CERT_FILE" _nsxv3_ini_set nsx_client_cert_storage "nsx-db" _nsxv3_ini_set nsx_client_cert_pk_password "openstack" fi } vmware-nsx-12.0.1/devstack/nsx_v3/0000775000175100017510000000000013244524600016752 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/devstack/nsx_v3/kvm_compute_local.conf.sample0000666000175100017510000000202113244523345024606 0ustar zuulzuul00000000000000[[post-config|$NOVA_CONF]] [neutron] ovs_bridge=nsxvswitch [[local|localrc]] =https://git.openstack.org/ enable_plugin vmware-nsx Q_PLUGIN=vmware_nsx_v3 ENABLED_SERVICES=n-cpu,neutron SERVICE_HOST= # OpenStack controller node IP MYSQL_HOST=$SERVICE_HOST RABBIT_HOST=$SERVICE_HOST Q_HOST=$SERVICE_HOST DATABASE_PASSWORD=openstack RABBIT_PASSWORD=openstack SERVICE_TOKEN=openstack SERVICE_PASSWORD=openstack ADMIN_PASSWORD=openstack RECLONE=no OVS_BRIDGE=nsxvswitch IPV6_ENABLED=False IP_VERSION=4 HOST_IP= # OpenStack compute node IP MULTI_HOST=1 NOVA_VNC_ENABLED=True NOVNCPROXY_URL="" VNCSERVER_LISTEN=$HOST_IP VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN disable_service zookeeper #DEBUG=False #DEFAULT_VLAN_TZ_UUID=changeme # Optional, for VLAN provider networks # Enable Logging LOGFILE=/opt/stack/logs/stack.sh.log VERBOSE=True LOG_COLOR=True NSX_MANAGER= NSX_USER= NSX_PASSWORD= vmware-nsx-12.0.1/devstack/nsx_v3/controller_local.conf.sample0000666000175100017510000000575013244523345024454 0ustar zuulzuul00000000000000[[post-extra|$TEMPEST_CONFIG]] [nsxv3] nsx_manager= nsx_user= nsx_password= [[post-config|$NOVA_CONF]] [DEFAULT] image_handlers=vmware_copy, vmware_download force_config_drive = False [vmware] task_poll_interval=0.5 use_linked_clone=false insecure = true datastore_regex = vdnet* [[local|localrc]] DATABASE_PASSWORD=openstack ADMIN_PASSWORD=openstack SERVICE_PASSWORD=openstack SERVICE_TOKEN=openstack RABBIT_PASSWORD=openstack # Enable Logging LOGFILE=/opt/stack/logs/stack.sh.log VERBOSE=True LOG_COLOR=True RECLONE=True VIRT_DRIVER=vsphere CINDER_DRIVER=vsphere CINDER_ENABLED_BACKENDS=vsphere VMWAREAPI_IP= VMWAREAPI_USER= VMWAREAPI_PASSWORD= VMWAREAPI_CLUSTER= # Use IPv4 only IP_VERSION=4 # Pre-requisite ENABLED_SERVICES=rabbit,mysql,key # Horizon (Dashboard UI) ENABLED_SERVICES+=,horizon #HORIZON_REPO=https://github.com/openstack/horizon # Nova - Compute Service ENABLED_SERVICES+=,n-api,n-crt,n-obj,n-cpu,n-cond,n-sch # Nova Network - If you don't want to use Neutron and need a simple network setup (old good stuff!) #ENABLED_SERVICES+=,n-net ## Nova Cells #ENABLED_SERVICES+=,n-cell # VNC server ENABLED_SERVICES+=,n-novnc,n-xvnc,n-cauth # Glance - Image Service ENABLED_SERVICES+=,g-api,g-reg # Tempest ENABLED_SERVICES+=,tempest # Swift - Object Storage #ENABLED_SERVICES+=,s-proxy,s-object,s-container,s-account # Neutron - Networking Service # If Neutron is not declared the old good nova-network will be used # If use agent to provider DHCP and metadata #ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,neutron # If use native DHCP support from NSX, q-dhcp & q-meta shouldn't be enabled ENABLED_SERVICES+=,q-svc,neutron ## Neutron - Load Balancing #ENABLED_SERVICES+=,q-lbaas ## Neutron - VPN as a Service #ENABLED_SERVICES+=,q-vpn ## Neutron - Firewall as a Service #ENABLED_SERVICES+=,q-fwaas # Cinder - Block Device Service #ENABLED_SERVICES+=,cinder,c-api,c-vol,c-sch,c-bak # Apache fronted for WSGI APACHE_ENABLED_SERVICES+=keystone,swift # Enable NSX-T plugin stable/liberty branch enable_plugin vmware-nsx Q_PLUGIN=vmware_nsx_v3 # Defatult vlan transport zone for provider network DEFAULT_VLAN_TZ_UUID= # Defatult overlay transport zone fro tenant network DEFAULT_OVERLAY_TZ_UUID= NSX_MANAGER= OVS_BRIDGE=nsxvswitch NSX_USER= NSX_PASSWORD= # Default tier0 uuid which is created by admin DEFAULT_TIER0_ROUTER_UUID= # Default Edge cluster uuid DEFAULT_EDGE_CLUSTER_UUID= # Enabled native DHCP support from NSX backend DHCP_PROFILE_UUID= DHCP_RELAY_SERVICE= METADATA_PROXY_UUID= METADATA_PROXY_SHARED_SECRET= METADATA_PROXY_USE_HTTPS=False METADATA_PROXY_CERT_FILE= METADATA_PROXY_PRIV_KEY_FILE= NATIVE_DHCP_METADATA=True vmware-nsx-12.0.1/devstack/nsx_v3/devstackgaterc0000666000175100017510000000375413244523345021707 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file is sourced by the NSX-v3 CI to run selective set of tests # based on the features that are ready to be tested. # Begin list of exclusions. r="^(?!.*" r="$r(?:tempest\.api\.network\.test_extensions\.ExtensionsTestJSON.*)" r="$r|(?:tempest\.api\.network\.test_routers\.DvrRoutersTest.*)" r="$r|(?:tempest\.api\.network\.test_routers_negative\.DvrRoutersNegativeTest.*)" r="$r|(?:tempest\.api\.network\.test_allowed_address_pair\.AllowedAddressPairTestJSON\.test_update_port_with_cidr_address_pair.*)" #Native DHCP has no agents r="$r|(?:tempest\.api\.network\.admin\.test_agent_management\.AgentManagementTestJSON.*)" #Can not create more than one DHCP-enabled subnet r="$r|(?:tempest\.api\.network\.test_ports\.PortsTestJSON\.test_create_update_port_with_second_ip.*)" r="$r|(?:tempest\.api\.network\.test_ports\.PortsTestJSON\.test_update_port_with_security_group_and_extra_attributes.*)" r="$r|(?:tempest\.api\.network\.test_ports\.PortsTestJSON\.test_update_port_with_two_security_groups_and_extra_attributes.*)" r="$r|(?:tempest\.api\.network\.test_extra_dhcp_options\.ExtraDHCPOptionsTestJSON\.test_.*_with_extra_dhcp_options.*)" r="$r|(?:tempest\.api\.network\.test_floating_ips\.FloatingIPTestJSON\.test_create_update_floatingip_with_port_multiple_ip_address.*)" # End list of exclusions. r="$r)" # only run tempest.api.network tests r="$r(tempest\.api\.network).*$" export DEVSTACK_GATE_TEMPEST_REGEX="$r" vmware-nsx-12.0.1/devstack/plugin.sh0000666000175100017510000001036313244523345017376 0ustar zuulzuul00000000000000#!/bin/bash # Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. GITDIR['vmware-nsxlib']=$DEST/vmware-nsxlib GITREPO['vmware-nsxlib']=${NSXLIB_REPO:-${GIT_BASE}/openstack/vmware-nsxlib.git} GITBRANCH['vmware-nsxlib']=${NSXLIB_BRANCH:-master} dir=${GITDIR['vmware-nsx']}/devstack if [[ "$1" == "stack" && "$2" == "install" ]]; then if use_library_from_git 'vmware-nsxlib'; then git_clone_by_name 'vmware-nsxlib' setup_dev_lib 'vmware-nsxlib' fi setup_develop ${GITDIR['vmware-nsx']} fi if [[ $Q_PLUGIN == 'vmware_nsx_v' ]]; then source $dir/lib/vmware_nsx_v if [[ "$1" == "unstack" ]]; then db_connection=$(iniget $NEUTRON_CONF database connection) python $dir/tools/nsxv_cleanup.py --vsm-ip ${NSXV_MANAGER_URI/https:\/\/} --user $NSXV_USER --password $NSXV_PASSWORD --db-connection $db_connection elif [[ "$1" == "clean" ]]; then if is_service_enabled q-svc || is_service_enabled neutron-api; then python $dir/tools/nsxv_cleanup.py --vsm-ip ${NSXV_MANAGER_URI/https:\/\/} --user $NSXV_USER --password $NSXV_PASSWORD fi fi elif [[ $Q_PLUGIN == 'vmware_nsx' ]]; then source $dir/lib/vmware_nsx if [[ "$1" == "stack" && "$2" == "post-config" ]]; then init_vmware_nsx elif [[ "$1" == "stack" && "$2" == "extra" ]]; then check_vmware_nsx elif [[ "$1" == "unstack" ]]; then stop_vmware_nsx fi elif [[ $Q_PLUGIN == 'vmware_nsx_v3' ]]; then source $dir/lib/vmware_nsx_v3 if [[ "$1" == "stack" && "$2" == "post-config" ]]; then init_vmware_nsx_v3 elif [[ "$1" == "unstack" ]]; then db_connection=$(iniget $NEUTRON_CONF database connection) stop_vmware_nsx_v3 # only clean up when q-svc (legacy support) or neutron-api is enabled if is_service_enabled q-svc || is_service_enabled neutron-api; then NSX_MANAGER=${NSX_MANAGERS:-$NSX_MANAGER} IFS=',' NSX_MANAGER=($NSX_MANAGER) unset IFS python $dir/tools/nsxv3_cleanup.py --mgr-ip $NSX_MANAGER --user $NSX_USER --password $NSX_PASSWORD --db-connection $db_connection fi elif [[ "$1" == 'clean' ]]; then if is_service_enabled q-svc || is_service_enabled neutron-api; then python $dir/tools/nsxv3_cleanup.py --mgr-ip $NSX_MANAGER --user $NSX_USER --password $NSX_PASSWORD fi fi elif [[ $Q_PLUGIN == 'vmware_nsx_tvd' ]]; then source $dir/lib/vmware_nsx_tvd if [[ "$1" == "stack" && "$2" == "post-config" ]]; then init_vmware_nsx_tvd elif [[ "$1" == "unstack" ]]; then db_connection=$(iniget $NEUTRON_CONF database connection) stop_vmware_nsx_tvd # only clean up when q-svc (legacy support) or neutron-api is enabled if is_service_enabled q-svc || is_service_enabled neutron-api; then NSX_MANAGER=${NSX_MANAGERS:-$NSX_MANAGER} IFS=',' NSX_MANAGER=($NSX_MANAGER) unset IFS python $dir/tools/nsxv3_cleanup.py --mgr-ip $NSX_MANAGER --user $NSX_USER --password $NSX_PASSWORD --db-connection $db_connection python $dir/tools/nsxv_cleanup.py --vsm-ip ${NSXV_MANAGER_URI/https:\/\/} --user $NSXV_USER --password $NSXV_PASSWORD --db-connection $db_connection fi elif [[ "$1" == 'clean' ]]; then if is_service_enabled q-svc || is_service_enabled neutron-api; then python $dir/tools/nsxv3_cleanup.py --mgr-ip $NSX_MANAGER --user $NSX_USER --password $NSX_PASSWORD python $dir/tools/nsxv_cleanup.py --vsm-ip ${NSXV_MANAGER_URI/https:\/\/} --user $NSXV_USER --password $NSXV_PASSWORD fi fi elif [[ $Q_PLUGIN == 'vmware_dvs' ]]; then source $dir/lib/vmware_dvs fi vmware-nsx-12.0.1/devstack/override-defaults0000666000175100017510000000012413244523345021105 0ustar zuulzuul00000000000000function has_neutron_plugin_security_group { # 0 means True here return 0 } vmware-nsx-12.0.1/devstack/settings0000666000175100017510000000172713244523345017333 0ustar zuulzuul00000000000000#!/bin/bash # Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. NSX_XTRACE=$(set +o | grep xtrace) set +o xtrace if [[ $Q_PLUGIN == 'vmware_nsx' ]]; then NSX_GATEWAY_NETWORK_INTERFACE=${NSX_GATEWAY_NETWORK_INTERFACE:-eth2} # Re-declare floating range as it's needed also in stop_vmware_nsx, which # is invoked by unstack.sh FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} fi $NSX_XTRACE vmware-nsx-12.0.1/devstack/tools/0000775000175100017510000000000013244524600016672 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/devstack/tools/nsxv_edge_resources.py0000666000175100017510000000746013244523345023336 0ustar zuulzuul00000000000000#!/usr/bin/env python # Copyright 2015 VMware Inc # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Purpose: Configure edge resource limits Usage: python nsxv_edge_resources.py --vsm-ip --username --password """ import base64 import optparse import xml.etree.ElementTree as et from oslo_serialization import jsonutils import requests import six requests.packages.urllib3.disable_warnings() class NSXClient(object): def __init__(self, host, username, password, *args, **kwargs): self._host = host self._username = username self._password = password def _get_headers(self, format): auth_cred = self._username + ":" + self._password auth = base64.b64encode(auth_cred) headers = {} headers['Authorization'] = "Basic %s" % auth headers['Content-Type'] = "application/%s" % format headers['Accept'] = "application/%s" % format return headers def _get_url(self, uri): return 'https://%s/%s' % (self._host, uri) def _get(self, format, uri): headers = self._get_headers(format) url = self._get_url(uri) response = requests.get(url, headers=headers, verify=False) return response def _put(self, format, uri, data): headers = self._get_headers(format) url = self._get_url(uri) response = requests.put(url, headers=headers, verify=False, data=data) return response def _get_tuning_configration(self): response = self._get("json", "/api/4.0/edgePublish/tuningConfiguration") return jsonutils.loads(response.text) def configure_reservations(self): config = self._get_tuning_configration() # NSX only receive XML format for the resource allocation update tuning = et.Element('tuningConfiguration') for opt, val in six.iteritems(config): child = et.Element(opt) if (opt == 'edgeVCpuReservationPercentage' or opt == 'edgeMemoryReservationPercentage'): child.text = '0' elif opt == 'megaHertzPerVCpu': child.text = '1500' else: child.text = str(val) tuning.append(child) self._put("xml", "/api/4.0/edgePublish/tuningConfiguration", et.tostring(tuning)) print("Edge resource limits set") if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("--vsm-ip", dest="vsm_ip", help="NSX Manager IP address") parser.add_option("-u", "--username", default="admin", dest="username", help="NSX Manager username") parser.add_option("-p", "--password", default="default", dest="password", help="NSX Manager password") (options, args) = parser.parse_args() print("vsm-ip: %s" % options.vsm_ip) print("username: %s" % options.username) print("password: %s" % options.password) nsx_client = NSXClient(options.vsm_ip, options.username, options.password) nsx_client.configure_reservations() vmware-nsx-12.0.1/devstack/tools/nsxv_fw_autodraft_setting.py0000666000175100017510000001064213244523345024556 0ustar zuulzuul00000000000000#!/usr/bin/env python # Copyright 2016 VMware Inc # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Purpose: Configure distributed firewall autodraft setting Usage: python nsxv_fw_autodraft_setting.py --vsm-ip --username --password [--autodraft-disable] [--autodraft-enable] """ import base64 import optparse from oslo_serialization import jsonutils import requests requests.packages.urllib3.disable_warnings() GLOBAL_CONFIG_URI = 'api/4.0/firewall/config/globalconfiguration' AUTO_DRAFT_DISABLED = 'autoDraftDisabled' class NSXClient(object): def __init__(self, host, username, password, *args, **kwargs): self._host = host self._username = username self._password = password def _get_headers(self, format): auth_cred = self._username + ":" + self._password auth = base64.b64encode(auth_cred) headers = {} headers['Authorization'] = "Basic %s" % auth headers['Content-Type'] = "application/%s" % format headers['Accept'] = "application/%s" % format return headers def _get_url(self, uri): return 'https://%s/%s' % (self._host, uri) def _get(self, format, uri): headers = self._get_headers(format) url = self._get_url(uri) response = requests.get(url, headers=headers, verify=False) return response def _put(self, format, uri, data): headers = self._get_headers(format) url = self._get_url(uri) response = requests.put(url, headers=headers, verify=False, data=data) return response def disable_autodraft(self): self._set_autodraft(True) def enable_autodraft(self): self._set_autodraft(False) def _get_global_config(self): resp = self._get('json', GLOBAL_CONFIG_URI) global_conf = jsonutils.loads(resp.text) return global_conf def _set_autodraft(self, disabled): global_conf = self._get_global_config() global_conf[AUTO_DRAFT_DISABLED] = disabled self._put('json', GLOBAL_CONFIG_URI, jsonutils.dumps(global_conf)) if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("--vsm-ip", dest="vsm_ip", help="NSX Manager IP address") parser.add_option("-u", "--username", default="admin", dest="username", help="NSX Manager username") parser.add_option("-p", "--password", default="default", dest="password", help="NSX Manager password") parser.add_option("--disable-autodraft", action="store_true", default=False, dest="disabled", help="Disable the autodraft setting for NSX " "distributed firewal.") parser.add_option("--enable-autodraft", action="store_true", default=False, dest="enabled", help="Enable the autodraft setting for NSX " "distributed firewal.") (options, args) = parser.parse_args() print("vsm-ip: %s" % options.vsm_ip) print("username: %s" % options.username) print("password: %s" % options.password) if options.disabled and options.enabled: print("Please provide only one of the options: --disable-autodraft, " "--enable-autodraft.") nsx_client = NSXClient(options.vsm_ip, options.username, options.password) if options.disabled: print("Disabling autodraft settings:") nsx_client.disable_autodraft() print("Autodraft is now disabled.") if options.enabled: print("Enabling autodraft settings:") nsx_client.enable_autodraft() print("Autodraft is now enabled.") vmware-nsx-12.0.1/devstack/tools/nsxv3_cleanup.py0000777000175100017510000004570113244523345022055 0ustar zuulzuul00000000000000# Copyright 2015 VMware Inc # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import optparse import sqlalchemy as sa from vmware_nsx.db import nsx_models from vmware_nsxlib import v3 from vmware_nsxlib.v3 import config from vmware_nsxlib.v3 import nsx_constants class NeutronNsxDB(object): def __init__(self, db_connection): super(NeutronNsxDB, self).__init__() engine = sa.create_engine(db_connection) self.session = sa.orm.session.sessionmaker()(bind=engine) def query_all(self, column, model): return list(set([r[column] for r in self.session.query(model).all()])) def get_logical_ports(self): return self.query_all('nsx_port_id', nsx_models.NeutronNsxPortMapping) def get_nsgroups(self): return self.query_all('nsx_id', nsx_models.NeutronNsxSecurityGroupMapping) def get_firewall_sections(self): return self.query_all('nsx_id', nsx_models.NeutronNsxFirewallSectionMapping) def get_logical_routers(self): return self.query_all('nsx_id', nsx_models.NeutronNsxRouterMapping) def get_logical_switches(self): return self.query_all('nsx_id', nsx_models.NeutronNsxNetworkMapping) def get_logical_dhcp_servers(self): return self.query_all('nsx_service_id', nsx_models.NeutronNsxServiceBinding) def get_vpn_sessions(self): return self.query_all('session_id', nsx_models.NsxVpnConnectionMapping) class NSXClient(object): """Base NSX REST client""" API_VERSION = "v1" NULL_CURSOR_PREFIX = '0000' def __init__(self, host, username, password, db_connection): self.host = host self.username = username self.password = password self.neutron_db = (NeutronNsxDB(db_connection) if db_connection else None) nsxlib_config = config.NsxLibConfig( username=self.username, password=self.password, nsx_api_managers=[self.host], # allow admin user to delete entities created # under openstack principal identity allow_overwrite_header=True) self.nsxlib = v3.NsxLib(nsxlib_config) def get_transport_zones(self): """ Retrieve all transport zones """ return self.nsxlib.transport_zone.list()['results'] def get_logical_ports(self): """ Retrieve all logical ports on NSX backend """ return self.nsxlib.logical_port.list()['results'] def get_os_logical_ports(self): """ Retrieve all logical ports created from OpenStack """ lports = self.get_os_resources( self.get_logical_ports()) if self.neutron_db: db_lports = self.neutron_db.get_logical_ports() lports = [lp for lp in lports if lp['id'] in db_lports] return lports def update_logical_port_attachment(self, lports): """ In order to delete logical ports, we need to detach the VIF attachment on the ports first. """ for p in lports: try: self.nsxlib.logical_port.update( p['id'], None, attachment_type=None) except Exception as e: print("ERROR: Failed to update lport %s: %s" % p['id'], e) def _remove_port_from_exclude_list(self, p): try: self.nsxlib.firewall_section.remove_member_from_fw_exclude_list( p['id'], None) except Exception: pass def _cleanup_logical_ports(self, lports): # logical port vif detachment self.update_logical_port_attachment(lports) for p in lports: # delete this port from the exclude list (if in it) self._remove_port_from_exclude_list(p) try: self.nsxlib.logical_port.delete(p['id']) except Exception as e: print("ERROR: Failed to delete logical port %s, error %s" % (p['id'], e)) else: print("Successfully deleted logical port %s" % p['id']) def cleanup_os_logical_ports(self): """ Delete all logical ports created by OpenStack """ os_lports = self.get_os_logical_ports() print("Number of OS Logical Ports to be deleted: %s" % len(os_lports)) self._cleanup_logical_ports(os_lports) def get_os_resources(self, resources): """ Get all logical resources created by OpenStack """ os_resources = [r for r in resources if 'tags' in r for tag in r['tags'] if 'os-api-version' in tag.values()] return os_resources def get_logical_switches(self): """ Retrieve all logical switches on NSX backend """ return self.nsxlib.logical_switch.list()['results'] def get_os_logical_switches(self): """ Retrieve all logical switches created from OpenStack """ lswitches = self.get_os_resources( self.get_logical_switches()) if self.neutron_db: db_lswitches = self.neutron_db.get_logical_switches() lswitches = [ls for ls in lswitches if ls['id'] in db_lswitches] return lswitches def get_lswitch_ports(self, ls_id): """ Return all the logical ports that belong to this lswitch """ lports = self.get_logical_ports() return [p for p in lports if p['logical_switch_id'] == ls_id] def cleanup_os_logical_switches(self): """ Delete all logical switches created from OpenStack """ lswitches = self.get_os_logical_switches() print("Number of OS Logical Switches to be deleted: %s" % len(lswitches)) for ls in lswitches: # Check if there are still ports on switch and blow them away # An example here is a metadata proxy port (this is not stored # in the DB so we are unable to delete it when reading ports # from the DB) lports = self.get_lswitch_ports(ls['id']) if lports: print("Number of orphan OS Logical Ports to be " "deleted: %s" % len(lports)) self._cleanup_logical_ports(lports) try: self.nsxlib.logical_switch.delete(ls['id']) except Exception as e: print("ERROR: Failed to delete logical switch %s-%s, " "error %s" % (ls['display_name'], ls['id'], e)) else: print("Successfully deleted logical switch %s-%s" % (ls['display_name'], ls['id'])) def get_firewall_sections(self): """ Retrieve all firewall sections """ return self.nsxlib.firewall_section.list() def get_os_firewall_sections(self): """ Retrieve all firewall sections created from OpenStack """ fw_sections = self.get_os_resources( self.get_firewall_sections()) if self.neutron_db: db_sections = self.neutron_db.get_firewall_sections() fw_sections = [fws for fws in fw_sections if fws['id'] in db_sections] return fw_sections def cleanup_os_firewall_sections(self): """ Cleanup all firewall sections created from OpenStack """ fw_sections = self.get_os_firewall_sections() print("Number of OS Firewall Sections to be deleted: %s" % len(fw_sections)) for fw in fw_sections: try: self.nsxlib.firewall_section.delete(fw['id']) except Exception as e: print("Failed to delete firewall section %s: %s" % (fw['display_name'], e)) else: print("Successfully deleted firewall section %s" % fw['display_name']) def get_ns_groups(self): """ Retrieve all NSGroups on NSX backend """ backend_groups = self.nsxlib.ns_group.list() ns_groups = self.get_os_resources(backend_groups) if self.neutron_db: db_nsgroups = self.neutron_db.get_nsgroups() ns_groups = [nsg for nsg in ns_groups if nsg['id'] in db_nsgroups] return ns_groups def cleanup_os_ns_groups(self): """ Cleanup all NSGroups created from OpenStack plugin """ ns_groups = self.get_ns_groups() print("Number of OS NSGroups to be deleted: %s" % len(ns_groups)) for nsg in ns_groups: try: self.nsxlib.ns_group.delete(nsg['id']) except Exception as e: print("Failed to delete NSGroup: %s: %s" % (nsg['display_name'], e)) else: print("Successfully deleted NSGroup: %s" % nsg['display_name']) def get_switching_profiles(self): """ Retrieve all Switching Profiles on NSX backend """ return self.nsxlib.switching_profile.list()['results'] def get_os_switching_profiles(self): """ Retrieve all Switching Profiles created from OpenStack """ sw_profiles = self.get_os_resources( self.get_switching_profiles()) if self.neutron_db: sw_profiles = [] return sw_profiles def cleanup_os_switching_profiles(self): """ Cleanup all Switching Profiles created from OpenStack plugin """ sw_profiles = self.get_os_switching_profiles() print("Number of OS SwitchingProfiles to be deleted: %s" % len(sw_profiles)) for swp in sw_profiles: try: self.nsxlib.switching_profile.delete(swp['id']) except Exception as e: print("Failed to delete Switching Profile: %s: %s" % (swp['display_name'], e)) else: print("Successfully deleted Switching Profile: %s" % swp['display_name']) def get_logical_routers(self, tier=None): """ Retrieve all the logical routers based on router type. If tier is None, it will return all logical routers. """ lrouters = self.nsxlib.logical_router.list( router_type=tier)['results'] if self.neutron_db: db_routers = self.neutron_db.get_logical_routers() lrouters = [lr for lr in lrouters if lr['id'] in db_routers] return lrouters def get_os_logical_routers(self): """ Retrieve all logical routers created from Neutron NSXv3 plugin """ lrouters = self.get_logical_routers() return self.get_os_resources(lrouters) def get_logical_router_ports(self, lrouter): """ Get all logical ports attached to lrouter """ return self.nsxlib.logical_router_port.get_by_router_id(lrouter['id']) def get_os_logical_router_ports(self, lrouter): """ Retrieve all logical router ports created from Neutron NSXv3 plugin """ lports = self.get_logical_router_ports(lrouter) return self.get_os_resources(lports) def cleanup_logical_router_ports(self, lrouter): """ Cleanup all logical ports on a logical router """ lports = self.get_os_logical_router_ports(lrouter) for lp in lports: try: self.nsxlib.logical_router_port.delete(lp['id']) except Exception as e: print("Failed to delete logical router port %s-%s, " "and response is %s" % (lp['display_name'], lp['id'], e)) else: print("Successfully deleted logical router port %s-%s" % (lp['display_name'], lp['id'])) def cleanup_os_logical_routers(self): """ Delete all logical routers created from OpenStack To delete a logical router, we need to delete all logical ports on the router first. """ lrouters = self.get_os_logical_routers() print("Number of OS Logical Routers to be deleted: %s" % len(lrouters)) for lr in lrouters: self.cleanup_logical_router_ports(lr) self.cleanup_logical_router_vpn_sess(lr) try: self.nsxlib.logical_router.delete(lr['id']) except Exception as e: print("ERROR: Failed to delete logical router %s-%s, " "error %s" % (lr['display_name'], lr['id'], e)) else: print("Successfully deleted logical router %s-%s" % (lr['display_name'], lr['id'])) def cleanup_os_tier0_logical_ports(self): """ Delete all TIER0 logical router ports created from OpenStack """ tier0_routers = self.get_logical_routers(tier='TIER0') for lr in tier0_routers: self.cleanup_logical_router_ports(lr) def get_logical_dhcp_servers(self): """ Retrieve all logical DHCP servers on NSX backend """ return self.nsxlib.dhcp_server.list()['results'] def get_os_logical_dhcp_servers(self): """ Retrieve all logical DHCP servers created from OpenStack """ dhcp_servers = self.get_os_resources( self.get_logical_dhcp_servers()) if self.neutron_db: db_dhcp_servers = self.neutron_db.get_logical_dhcp_servers() dhcp_servers = [srv for srv in dhcp_servers if srv['id'] in db_dhcp_servers] return dhcp_servers def cleanup_os_logical_dhcp_servers(self): """ Cleanup all logical DHCP servers created from OpenStack plugin """ dhcp_servers = self.get_os_logical_dhcp_servers() print("Number of OS Logical DHCP Servers to be deleted: %s" % len(dhcp_servers)) for server in dhcp_servers: try: self.nsxlib.dhcp_server.delete(server['id']) except Exception as e: print("ERROR: Failed to delete logical DHCP server %s, " "error %s" % (server['display_name'], e)) else: print("Successfully deleted logical DHCP server %s" % server['display_name']) def get_os_vpn_sessions(self): """ Retrieve all nsx vpn sessions from nsx and OpenStack """ sessions = self.get_os_resources( self.nsxlib.vpn_ipsec.session.list()['results']) if self.neutron_db: db_sessions = self.neutron_db.get_vpn_sessions() sessions = [sess for sess in sessions if sess['id'] in db_sessions] return sessions def cleanup_vpnaas_objects(self): """ Cleanup vpn/ipsec nsx objects """ if not self.nsxlib.feature_supported(nsx_constants.FEATURE_IPSEC_VPN): # no vpn support return # sessions: leftover sessions prevent us from configuring new similar # sessions so it is important to delete them sessions = self.get_os_vpn_sessions() for session in sessions: try: self.nsxlib.vpn_ipsec.session.delete(session['id']) except Exception as e: print("ERROR: Failed to delete vpn ipsec session %s, " "error %s" % (session['id'], e)) else: print("Successfully deleted vpn ipsec session %s" % session['id']) def cleanup_logical_router_vpn_sess(self, lr): """ Cleanup the vpn local session of the logical router """ if not self.nsxlib.feature_supported(nsx_constants.FEATURE_IPSEC_VPN): # no vpn support return # find the router neutron id in its tags neutron_id = None for tag in lr['tags']: if tag.get('scope') == 'os-neutron-router-id': neutron_id = tag.get('tag') break if not neutron_id: return tags = [{'scope': 'os-neutron-router-id', 'tag': neutron_id}] ep_list = self.nsxlib.search_by_tags( tags=tags, resource_type=self.nsxlib.vpn_ipsec.local_endpoint.resource_type) if ep_list['results']: id = ep_list['results'][0]['id'] try: self.nsxlib.vpn_ipsec.local_endpoint.delete(id) except Exception as e: print("ERROR: Failed to delete vpn ipsec local endpoint %s, " "error %s" % (id, e)) else: print("Successfully deleted vpn ipsec local endpoint %s" % id) def cleanup_all(self): """ Cleanup steps: 1. Cleanup firewall sections 2. Cleanup NSGroups 3. Cleanup logical router ports 4. Cleanup logical routers 5. Cleanup logical switch ports 6. Cleanup logical switches 7. Cleanup switching profiles """ self.cleanup_os_firewall_sections() self.cleanup_os_ns_groups() self.cleanup_vpnaas_objects() self.cleanup_os_logical_routers() self.cleanup_os_tier0_logical_ports() self.cleanup_os_logical_ports() self.cleanup_os_logical_switches() self.cleanup_os_logical_dhcp_servers() self.cleanup_os_switching_profiles() if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("--mgr-ip", dest="mgr_ip", help="NSX Manager IP address") parser.add_option("-u", "--username", default="admin", dest="username", help="NSX Manager username") parser.add_option("-p", "--password", default="default", dest="password", help="NSX Manager password") parser.add_option("--db-connection", default="", dest="db_connection", help=("When set, cleaning only backend resources that " "have db record.")) (options, args) = parser.parse_args() # Get NSX REST client nsx_client = NSXClient(options.mgr_ip, options.username, options.password, options.db_connection) # Clean all objects created by OpenStack nsx_client.cleanup_all() vmware-nsx-12.0.1/devstack/tools/nsxv_cleanup.py0000666000175100017510000004015613244523345021766 0ustar zuulzuul00000000000000#!/usr/bin/env python # Copyright 2015 VMware Inc # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Purpose: Sometimes NSXv backend are out of sync with OpenStack and all the objects created by OpenStack needs to be cleaned up. This is a util script to cleanup NSXv objects created by OpenStack List of objects to be cleared: - Edge (Service Edge, DHCP Edge, VDR Edge) - Logical Switches (Tenant Networks) - Firewall Rules (Security Group) Usage: python nsxv_cleanup.py --vsm-ip --username --password --force Note: force is optional. If it is specified, force delete security group You can also use it in python interactive console by import the module >>>> import nsxv_cleanup >>>> vsm = nsxv_cleanup.VSMClient('10.34.57.101', 'admin', 'default') Cleanup all logical switch >>>> vsm.cleanup_logical_switch() Cleanup all firewall section >>>> vsm.cleanup_firewall_section() Cleanup all security group >>>> vsm.cleanup_security_group() Cleanup all edges >>>> vsm.cleanup_edge() Cleanup all >>>> vsm.cleanup_all() If you have any comment or find a bug, please contact Tong Liu """ import base64 import optparse import sys from oslo_serialization import jsonutils import requests import sqlalchemy as sa from vmware_nsx.db import nsx_models from vmware_nsx.db import nsxv_models requests.packages.urllib3.disable_warnings() class NeutronNsxDB(object): def __init__(self, db_connection): super(NeutronNsxDB, self).__init__() engine = sa.create_engine(db_connection) self.session = sa.orm.session.sessionmaker()(bind=engine) def query_all(self, column, model): return list(set([r[column] for r in self.session.query(model).all()])) def query_all_firewall_sections(self): return self.query_all('ip_section_id', nsxv_models.NsxvSecurityGroupSectionMapping) def query_all_security_groups(self): return self.query_all('nsx_id', nsx_models.NeutronNsxSecurityGroupMapping) def query_all_logical_switches(self): return self.query_all('nsx_id', nsx_models.NeutronNsxNetworkMapping) def query_all_spoofguard_policies(self): return self.query_all('policy_id', nsxv_models.NsxvSpoofGuardPolicyNetworkMapping) def query_all_edges(self): return self.query_all('edge_id', nsxv_models.NsxvRouterBinding) class VSMClient(object): """Base VSM REST client """ API_VERSION = "2.0" def __init__(self, host, username, password, db_connection, force): self.force = force self.host = host self.username = username self.password = password self.version = None self.endpoint = None self.content_type = "application/json" self.accept_type = "application/json" self.verify = False self.secure = True self.interface = "json" self.url = None self.headers = None self.api_version = VSMClient.API_VERSION self.neutron_db = (NeutronNsxDB(db_connection) if db_connection else None) self.__set_headers() def __set_endpoint(self, endpoint): self.endpoint = endpoint def get_endpoint(self): return self.endpoint def __set_content_type(self, content_type): self.content_type = content_type def get_content_type(self): return self.content_type def __set_accept_type(self, accept_type): self.accept_type = accept_type def get_accept_type(self): return self.accept_type def __set_api_version(self, api_version): self.api_version = api_version def get_api_version(self): return self.api def __set_url(self, api=None, secure=None, host=None, endpoint=None): api = self.api_version if api is None else api secure = self.secure if secure is None else secure host = self.host if host is None else host endpoint = self.endpoint if endpoint is None else endpoint http_type = 'https' if secure else 'http' self.url = '%s://%s/api/%s%s' % (http_type, host, api, endpoint) def get_url(self): return self.url def __set_headers(self, content=None, accept=None): content_type = self.content_type if content is None else content accept_type = self.accept_type if accept is None else accept auth_cred = self.username + ":" + self.password auth = base64.b64encode(auth_cred) headers = {} headers['Authorization'] = "Basic %s" % auth headers['Content-Type'] = content_type headers['Accept'] = accept_type self.headers = headers def get(self, endpoint=None, params=None): """ Basic query method for json API request """ self.__set_url(endpoint=endpoint) response = requests.get(self.url, headers=self.headers, verify=self.verify, params=params) return response def delete(self, endpoint=None, params=None): """ Basic delete API method on endpoint """ self.__set_url(endpoint=endpoint) response = requests.delete(self.url, headers=self.headers, verify=self.verify, params=params) return response def post(self, endpoint=None, body=None): """ Basic post API method on endpoint """ self.__set_url(endpoint=endpoint) self.__set_headers() response = requests.post(self.url, headers=self.headers, verify=self.verify, data=jsonutils.dumps(body)) return response def get_vdn_scope_id(self): """ Retrieve existing network scope id """ self.__set_api_version('2.0') self.__set_endpoint("/vdn/scopes") response = self.get() if len(response.json()['allScopes']) == 0: return else: return response.json()['allScopes'][0]['objectId'] def query_all_logical_switches(self): lswitches = [] self.__set_api_version('2.0') vdn_scope_id = self.get_vdn_scope_id() if not vdn_scope_id: return lswitches endpoint = "/vdn/scopes/%s/virtualwires" % (vdn_scope_id) self.__set_endpoint(endpoint) # Query all logical switches response = self.get() paging_info = response.json()['dataPage']['pagingInfo'] page_size = int(paging_info['pageSize']) total_count = int(paging_info['totalCount']) print("There are total %s logical switches and page size is %s" % ( total_count, page_size)) pages = ceil(total_count, page_size) print("Total pages: %s" % pages) for i in range(0, pages): start_index = page_size * i params = {'startindex': start_index} response = self.get(params=params) temp_lswitches = response.json()['dataPage']['data'] lswitches += temp_lswitches if self.neutron_db: db_lswitches = self.neutron_db.query_all_logical_switches() lswitches = [ls for ls in lswitches if ls['objectId'] in db_lswitches] return lswitches def cleanup_logical_switch(self): print("Cleaning up logical switches on NSX manager") lswitches = self.query_all_logical_switches() print("There are total %s logical switches" % len(lswitches)) for ls in lswitches: print("\nDeleting logical switch %s (%s) ..." % (ls['name'], ls['objectId'])) endpoint = '/vdn/virtualwires/%s' % ls['objectId'] response = self.delete(endpoint=endpoint) if response.status_code != 200: print("ERROR: response status code %s" % response.status_code) def query_all_firewall_sections(self): firewall_sections = [] self.__set_api_version('4.0') self.__set_endpoint('/firewall/globalroot-0/config') # Query all firewall sections response = self.get() # Get layer3 sections related to security group if response.status_code is 200: l3_sections = response.json()['layer3Sections']['layer3Sections'] # do not delete the default section, or sections created by the # service composer firewall_sections = [s for s in l3_sections if (s['name'] != "Default Section Layer3" and "NSX Service Composer" not in s['name'])] else: print("ERROR: wrong response status code! Exiting...") sys.exit() if self.neutron_db: db_sections = self.neutron_db.query_all_firewall_sections() firewall_sections = [fws for fws in firewall_sections if fws['id'] in db_sections] return firewall_sections def cleanup_firewall_section(self): print("\n\nCleaning up firewall sections on NSX manager") l3_sections = self.query_all_firewall_sections() print("There are total %s firewall sections" % len(l3_sections)) for l3sec in l3_sections: print("\nDeleting firewall section %s (%s) ..." % (l3sec['name'], l3sec['id'])) endpoint = '/firewall/globalroot-0/config/layer3sections/%s' % \ l3sec['id'] response = self.delete(endpoint=endpoint) if response.status_code != 204: print("ERROR: response status code %s" % response.status_code) def query_all_security_groups(self): security_groups = [] self.__set_api_version('2.0') self.__set_endpoint("/services/securitygroup/scope/globalroot-0") # Query all security groups response = self.get() if response.status_code is 200: sg_all = response.json() else: print("ERROR: wrong response status code! Exiting...") sys.exit() # Remove Activity Monitoring Data Collection, which is not # related to any security group created by OpenStack security_groups = [sg for sg in sg_all if sg['name'] != "Activity Monitoring Data Collection"] if self.neutron_db: db_sgs = self.neutron_db.query_all_security_groups() security_groups = [sg for sg in security_groups if sg['objectId'] in db_sgs] return security_groups def cleanup_security_group(self): print("\n\nCleaning up security groups on NSX manager") security_groups = self.query_all_security_groups() print("There are total %s security groups" % len(security_groups)) for sg in security_groups: print("\nDeleting security group %s (%s) ..." % (sg['name'], sg['objectId'])) endpoint = '/services/securitygroup/%s' % sg['objectId'] params = {'force': self.force} response = self.delete(endpoint=endpoint, params=params) if response.status_code != 200: print("ERROR: response status code %s" % response.status_code) def query_all_spoofguard_policies(self): self.__set_api_version('4.0') self.__set_endpoint("/services/spoofguard/policies/") # Query all spoofguard policies response = self.get() if response.status_code is not 200: print("ERROR: Faield to get spoofguard policies") return sgp_all = response.json() policies = [sgp for sgp in sgp_all['policies'] if sgp['name'] != 'Default Policy'] if self.neutron_db: db_policies = self.neutron_db.query_all_spoofguard_policies() policies = [p for p in policies if p['policyId'] in db_policies] return policies def cleanup_spoofguard_policies(self): print("\n\nCleaning up spoofguard policies") policies = self.query_all_spoofguard_policies() print("There are total %s policies" % len(policies)) for spg in policies: print("\nDeleting spoofguard policy %s (%s) ..." % (spg['name'], spg['policyId'])) endpoint = '/services/spoofguard/policies/%s' % spg['policyId'] response = self.delete(endpoint=endpoint) print("Response code: %s" % response.status_code) def query_all_edges(self): edges = [] self.__set_api_version('4.0') self.__set_endpoint("/edges") # Query all edges response = self.get() paging_info = response.json()['edgePage']['pagingInfo'] page_size = int(paging_info['pageSize']) total_count = int(paging_info['totalCount']) print("There are total %s edges and page size is %s" % ( total_count, page_size)) pages = ceil(total_count, page_size) print("Total pages: %s" % pages) for i in range(0, pages): start_index = page_size * i params = {'startindex': start_index} response = self.get(params=params) temp_edges = response.json()['edgePage']['data'] edges += temp_edges if self.neutron_db: db_edges = self.neutron_db.query_all_edges() edges = [e for e in edges if e['id'] in db_edges] return edges def cleanup_edge(self): print("\n\nCleaning up edges on NSX manager") edges = self.query_all_edges() for edge in edges: print("\nDeleting edge %s (%s) ..." % (edge['name'], edge['id'])) endpoint = '/edges/%s' % edge['id'] response = self.delete(endpoint=endpoint) if response.status_code != 204: print("ERROR: response status code %s" % response.status_code) def cleanup_all(self): self.cleanup_firewall_section() self.cleanup_security_group() self.cleanup_spoofguard_policies() self.cleanup_edge() self.cleanup_logical_switch() def ceil(a, b): if b == 0: return 0 div = a / b mod = 0 if a % b is 0 else 1 return div + mod if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("--vsm-ip", dest="vsm_ip", help="NSX Manager IP address") parser.add_option("-u", "--username", default="admin", dest="username", help="NSX Manager username") parser.add_option("-p", "--password", default="default", dest="password", help="NSX Manager password") parser.add_option("--db-connection", dest="db_connection", default="", help=("When set, cleaning only backend resources that " "have db record.")) parser.add_option("-f", "--force", dest="force", action="store_true", help="Force cleanup option") (options, args) = parser.parse_args() print("vsm-ip: %s" % options.vsm_ip) print("username: %s" % options.username) print("password: %s" % options.password) print("db-connection: %s" % options.db_connection) print("force: %s" % options.force) # Get VSM REST client vsm_client = VSMClient(options.vsm_ip, options.username, options.password, options.db_connection, options.force) # Clean all objects created by OpenStack vsm_client.cleanup_all() vmware-nsx-12.0.1/devstack/nsx_v/0000775000175100017510000000000013244524600016667 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/devstack/nsx_v/devstackgaterc0000666000175100017510000000232713244523345021617 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file is sourced by the NSX-v3 CI to run selective set of tests # based on the features that are ready to be tested. # Begin list of exclusions. r="^(?!.*" r="$r(?:tempest\.api\.network\.test_ports\.PortsTestJSON\.test_create_update_port_with_second_ip.*)" r="$r|(?:tempest\.api\.network\.test_floating_ips\.FloatingIPTestJSON\.test_create_update_floatingip_with_port_multiple_ip_address.*)" r="$r|(?:tempest\.api\.network\.test_routers\.RoutersTest\.test_update_delete_extra_route.*)" # End list of exclusions. r="$r)" # only run tempest.api.network tests r="$r(tempest\.api\.network).*$" export DEVSTACK_GATE_TEMPEST_REGEX="$r" vmware-nsx-12.0.1/devstack/nsx_v/tvd_devstackgaterc0000666000175100017510000000375613244523345022503 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file is sourced by the NSX-TVD-V CI to run selective set of tests # based on the features that are ready to be tested. # Note that the TVD plugin enabled a lot of extensions that the NSX-V plugin does not support # so those tests should be skipped. # Begin list of exclusions. r="^(?!.*" # unsupported NSX-V tests r="$r(?:tempest\.api\.network\.test_ports\.PortsTestJSON\.test_create_update_port_with_second_ip.*)" r="$r|(?:tempest\.api\.network\.test_floating_ips\.FloatingIPTestJSON\.test_create_update_floatingip_with_port_multiple_ip_address.*)" r="$r|(?:tempest\.api\.network\.test_routers\.RoutersTest\.test_update_delete_extra_route.*)" # unsupported TVD tests r="$r|(?:tempest\.api\.network\.test_networks\.NetworksTest\.test_show_network_fields.*)" r="$r|(?:tempest\.api\.network\.test_extra_dhcp_options\.ExtraDHCPOptionsTestJSON\.test_update.*)" r="$r|(?:tempest\.api\.network\.test_extensions\.ExtensionsTestJSON\..*)" r="$r|(?:tempest\.api\.network\.test_allowed_address_pair\.AllowedAddressPairTestJSON\.test_update.*)" r="$r|(?:tempest\.api\.network\.admin\.test_routers_dvr\.RoutersTestDVR\..*)" r="$r|(?:tempest\.api\.network\.admin\.test_l3_agent_scheduler\.L3AgentSchedulerTestJSON\..*)" r="$r|(?:tempest\.api\.network\.admin.\test_metering_extensions.*)" # End list of exclusions. r="$r)" # only run tempest.api.network tests r="$r(tempest\.api\.network).*$" export DEVSTACK_GATE_TEMPEST_REGEX="$r" vmware-nsx-12.0.1/HACKING.rst0000666000175100017510000000261013244523345015532 0ustar zuulzuul00000000000000VMware-NSX Style Commandments ============================= - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/developer/hacking/ - Step 2: Read on VMware-NSX Specific Commandments -------------------------------- - [N319] Validate that debug level logs are not translated - [N320] Validate that LOG messages, except debug ones, have translations - [N321] Validate that jsonutils module is used instead of json - [N322] We do not use @authors tags in source files. We have git to track authorship. - [N323] Detect common errors with assert_called_once_with Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. All unittest classes must ultimately inherit from testtools.TestCase. In the Neutron test suite, this should be done by inheriting from neutron.tests.base.BaseTestCase. All setUp and tearDown methods must upcall using the super() method. tearDown methods should be avoided and addCleanup calls should be preferred. Never manually create tempfiles. Always use the tempfile fixtures from the fixture library to ensure that they are cleaned up. vmware-nsx-12.0.1/requirements.txt0000666000175100017510000000211213244523345017215 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr!=2.1.0,>=2.0.0 # Apache-2.0 enum34>=1.0.4;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 # MIT httplib2>=0.9.1 # MIT netaddr>=0.7.18 # BSD tenacity>=3.2.1 # Apache-2.0 SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT six>=1.10.0 # MIT stevedore>=1.20.0 # Apache-2.0 neutron-lib>=1.13.0 # Apache-2.0 osc-lib>=1.8.0 # Apache-2.0 python-openstackclient>=3.12.0 # Apache-2.0 oslo.concurrency>=3.25.0 # Apache-2.0 oslo.context>=2.19.2 # Apache-2.0 oslo.config>=5.1.0 # Apache-2.0 oslo.db>=4.27.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.service!=1.28.1,>=1.24.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 oslo.vmware>=2.17.0 # Apache-2.0 PrettyTable<0.8,>=0.7.1 # BSD tooz>=1.58.0 # Apache-2.0 decorator>=3.4.0 # BSD vmware-nsx-12.0.1/TESTING.rst0000666000175100017510000001416413244523345015612 0ustar zuulzuul00000000000000Testing VMware-NSX ================== Overview -------- The unit tests (vmware_nsx/tests/unit/) are meant to cover as much code as possible and should be executed without the service running. They are designed to test the various pieces of the neutron and VMware NSX tree to make sure any new changes don't break existing functionality. Development process ------------------- It is expected that any new changes that are proposed for merge come with tests for that feature or code area. Ideally any bugs fixes that are submitted also have tests to prove that they stay fixed! In addition, before proposing for merge, all of the current tests should be passing. Virtual environments ~~~~~~~~~~~~~~~~~~~~ Testing OpenStack projects, including Neutron, is made easier with `DevStack `_. Create a machine (such as a VM or Vagrant box) running a distribution supported by DevStack and install DevStack there. For example, there is a Vagrant script for DevStack at https://github.com/bcwaldon/vagrant_devstack. .. note:: If you prefer not to use DevStack, you can still check out source code on your local machine and develop from there. Running unit tests ------------------ There are three mechanisms for running tests: run_tests.sh, tox, and nose. Before submitting a patch for review you should always ensure all test pass; a tox run is triggered by the jenkins gate executed on gerrit for each patch pushed for review. With these mechanisms you can either run the tests in the standard environment or create a virtual environment to run them in. By default after running all of the tests, any pep8 errors found in the tree will be reported. With `run_tests.sh` ~~~~~~~~~~~~~~~~~~~ You can use the `run_tests.sh` script in the root source directory to execute tests in a virtualenv:: ./run_tests -V With `nose` ~~~~~~~~~~~ You can use `nose`_ to run individual tests, as well as use for debugging portions of your code:: . .venv/bin/activate pip install nose nosetests There are disadvantages to running Nose - the tests are run sequentially, so race condition bugs will not be triggered, and the full test suite will take significantly longer than tox & testr. The upside is that testr has some rough edges when it comes to diagnosing errors and failures, and there is no easy way to set a breakpoint in the Neutron code, and enter an interactive debugging session while using testr. .. _nose: https://nose.readthedocs.org/en/latest/index.html With `tox` ~~~~~~~~~~ VMware NSX, like other OpenStack projects, uses `tox`_ for managing the virtual environments for running test cases. It uses `Testr`_ for managing the running of the test cases. Tox handles the creation of a series of `virtualenvs`_ that target specific versions of Python (2.7, 3.3, etc). Testr handles the parallel execution of series of test cases as well as the tracking of long-running tests and other things. Running unit tests is as easy as executing this in the root directory of the Neutron source code:: tox To run functional tests that do not require sudo privileges or specific-system dependencies:: tox -e functional To run all the functional tests in an environment that has been configured by devstack to support sudo and system-specific dependencies:: tox -e dsvm-functional For more information on the standard Tox-based test infrastructure used by OpenStack and how to do some common test/debugging procedures with Testr, see this wiki page: https://wiki.openstack.org/wiki/Testr .. _Testr: https://wiki.openstack.org/wiki/Testr .. _tox: http://tox.readthedocs.org/en/latest/ .. _virtualenvs: https://pypi.python.org/pypi/virtualenv Running individual tests ~~~~~~~~~~~~~~~~~~~~~~~~ For running individual test modules or cases, you just need to pass the dot-separated path to the module you want as an argument to it. For executing a specific test case, specify the name of the test case class separating it from the module path with a colon. For example, the following would run only the TestSubnetsV2 tests from vmware_nsx/tests/unit/nsx_v/test_plugin.py:: $ ./run_tests.sh vmware_nsx.tests.unit.nsx_v.test_plugin.TestSubnetsV2 or:: $ tox -e py27 vmware_nsx.tests.unit.nsx_v.test_plugin.TestSubnetsV2 Adding more tests ~~~~~~~~~~~~~~~~~ VMware NSX has a fast growing code base and there is plenty of areas that need to be covered by unit and functional tests. To get a grasp of the areas where tests are needed, you can check current coverage by running:: $ ./run_tests.sh -c Debugging --------- By default, calls to pdb.set_trace() will be ignored when tests are run. For pdb statements to work, invoke run_tests as follows:: $ ./run_tests.sh -d [test module path] It's possible to debug tests in a tox environment:: $ tox -e venv -- python -m testtools.run [test module path] Tox-created virtual environments (venv's) can also be activated after a tox run and reused for debugging:: $ tox -e venv $ . .tox/venv/bin/activate $ python -m testtools.run [test module path] Tox packages and installs the vmware-nsx source tree in a given venv on every invocation, but if modifications need to be made between invocation (e.g. adding more pdb statements), it is recommended that the source tree be installed in the venv in editable mode:: # run this only after activating the venv $ pip install --editable . Editable mode ensures that changes made to the source tree are automatically reflected in the venv, and that such changes are not overwritten during the next tox run. Post-mortem debugging ~~~~~~~~~~~~~~~~~~~~~ Setting OS_POST_MORTEM_DEBUGGER in the shell environment will ensure that the debugger .post_mortem() method will be invoked on test failure:: $ OS_POST_MORTEM_DEBUGGER=pdb ./run_tests.sh -d [test module path] Supported debuggers are pdb, and pudb. Pudb is full-screen, console-based visual debugger for Python which let you inspect variables, the stack, and breakpoints in a very visual way, keeping a high degree of compatibility with pdb:: $ ./.venv/bin/pip install pudb $ OS_POST_MORTEM_DEBUGGER=pudb ./run_tests.sh -d [test module path] vmware-nsx-12.0.1/.coveragerc0000666000175100017510000000015713244523345016061 0ustar zuulzuul00000000000000[run] branch = True source = neutron omit = neutron/tests/*,neutron/openstack/* [report] ignore_errors = True vmware-nsx-12.0.1/run_tests.sh0000777000175100017510000001746113244523345016333 0ustar zuulzuul00000000000000#!/usr/bin/env bash set -eu function usage { echo "Usage: $0 [OPTION]..." echo "Run Neutron's test suite(s)" echo "" echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment" echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." echo " -n, --no-recreate-db Don't recreate the test database." echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." echo " -u, --update Update the virtual environment with any newer package versions" echo " -p, --pep8 Just run PEP8 and HACKING compliance check" echo " -8, --pep8-only-changed []" echo " Just run PEP8 and HACKING compliance check on files changed since HEAD~1 (or )" echo " -P, --no-pep8 Don't run static code checks" echo " -c, --coverage Generate coverage report" echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger." echo " -h, --help Print this usage message" echo " --virtual-env-path Location of the virtualenv directory" echo " Default: \$(pwd)" echo " --virtual-env-name Name of the virtualenv directory" echo " Default: .venv" echo " --tools-path Location of the tools directory" echo " Default: \$(pwd)" echo "" echo "Note: with no options specified, the script will try to run the tests in a virtual environment," echo " If no virtualenv is found, the script will ask if you would like to create one. If you " echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." exit } function process_options { i=1 while [ $i -le $# ]; do case "${!i}" in -h|--help) usage;; -V|--virtual-env) always_venv=1; never_venv=0;; -N|--no-virtual-env) always_venv=0; never_venv=1;; -s|--no-site-packages) no_site_packages=1;; -r|--recreate-db) recreate_db=1;; -n|--no-recreate-db) recreate_db=0;; -f|--force) force=1;; -u|--update) update=1;; -p|--pep8) just_pep8=1;; -8|--pep8-only-changed) just_pep8_changed=1;; -P|--no-pep8) no_pep8=1;; -c|--coverage) coverage=1;; -d|--debug) debug=1;; --virtual-env-path) (( i++ )) venv_path=${!i} ;; --virtual-env-name) (( i++ )) venv_dir=${!i} ;; --tools-path) (( i++ )) tools_path=${!i} ;; -*) testopts="$testopts ${!i}";; *) testargs="$testargs ${!i}" esac (( i++ )) done } tool_path=${tools_path:-$(pwd)} venv_path=${venv_path:-$(pwd)} venv_dir=${venv_name:-.venv} with_venv=tools/with_venv.sh always_venv=0 never_venv=0 force=0 no_site_packages=0 installvenvopts= testargs= testopts= wrapper="" just_pep8=0 just_pep8_changed=0 no_pep8=0 coverage=0 debug=0 recreate_db=1 update=0 LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=C process_options $@ # Make our paths available to other scripts we call export venv_path export venv_dir export venv_name export tools_dir export venv=${venv_path}/${venv_dir} if [ $no_site_packages -eq 1 ]; then installvenvopts="--no-site-packages" fi function run_tests { # Cleanup *pyc ${wrapper} find . -type f -name "*.pyc" -delete if [ $debug -eq 1 ]; then if [ "$testopts" = "" ] && [ "$testargs" = "" ]; then # Default to running all tests if specific test is not # provided. testargs="discover ./vmware_nsx/tests" fi ${wrapper} python -m testtools.run $testopts $testargs # Short circuit because all of the testr and coverage stuff # below does not make sense when running testtools.run for # debugging purposes. return $? fi if [ $coverage -eq 1 ]; then TESTRTESTS="$TESTRTESTS --coverage" else TESTRTESTS="$TESTRTESTS --slowest" fi # Just run the test suites in current environment set +e testargs=`echo "$testargs" | sed -e's/^\s*\(.*\)\s*$/\1/'` TESTRTESTS="$TESTRTESTS --testr-args='--subunit $testopts $testargs'" OS_TEST_PATH=`echo $testargs|grep -o 'vmware_nsx\neutron\.tests[^[:space:]:]\+'|tr . /` if [ -n "$OS_TEST_PATH" ]; then os_test_dir=$(dirname "$OS_TEST_PATH") else os_test_dir='' fi if [ -d "$OS_TEST_PATH" ]; then wrapper="OS_TEST_PATH=$OS_TEST_PATH $wrapper" elif [ -d "$os_test_dir" ]; then wrapper="OS_TEST_PATH=$os_test_dir $wrapper" fi echo "Running \`${wrapper} $TESTRTESTS\`" bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit2pyunit" RESULT=$? set -e copy_subunit_log if [ $coverage -eq 1 ]; then echo "Generating coverage report in covhtml/" # Don't compute coverage for common code, which is tested elsewhere ${wrapper} coverage combine ${wrapper} coverage html --include='neutron/*' --omit='neutron/openstack/common/*' -d covhtml -i fi return $RESULT } function copy_subunit_log { LOGNAME=`cat .testrepository/next-stream` LOGNAME=$(($LOGNAME - 1)) LOGNAME=".testrepository/${LOGNAME}" cp $LOGNAME subunit.log } function warn_on_flake8_without_venv { if [ $never_venv -eq 1 ]; then echo "**WARNING**:" echo "Running flake8 without virtual env may miss OpenStack HACKING detection" fi } function run_pep8 { echo "Running flake8 ..." warn_on_flake8_without_venv ${wrapper} flake8 } function run_pep8_changed { # NOTE(gilliard) We want use flake8 to check the entirety of every file that has # a change in it. Unfortunately the --filenames argument to flake8 only accepts # file *names* and there are no files named (eg) "nova/compute/manager.py". The # --diff argument behaves surprisingly as well, because although you feed it a # diff, it actually checks the file on disk anyway. local target=${testargs:-HEAD~1} local files=$(git diff --name-only $target | tr '\n' ' ') echo "Running flake8 on ${files}" warn_on_flake8_without_venv diff -u --from-file /dev/null ${files} | ${wrapper} flake8 --diff } TESTRTESTS="python setup.py testr" if [ $never_venv -eq 0 ] then # Remove the virtual environment if --force used if [ $force -eq 1 ]; then echo "Cleaning virtualenv..." rm -rf ${venv} fi if [ $update -eq 1 ]; then echo "Updating virtualenv..." python tools/install_venv.py $installvenvopts fi if [ -e ${venv} ]; then wrapper="${with_venv}" else if [ $always_venv -eq 1 ]; then # Automatically install the virtualenv python tools/install_venv.py $installvenvopts wrapper="${with_venv}" else echo -e "No virtual environment found...create one? (Y/n) \c" read use_ve if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then # Install the virtualenv and run the test suite in it python tools/install_venv.py $installvenvopts wrapper=${with_venv} fi fi fi fi # Delete old coverage data from previous runs if [ $coverage -eq 1 ]; then ${wrapper} coverage erase fi if [ $just_pep8 -eq 1 ]; then run_pep8 exit fi if [ $just_pep8_changed -eq 1 ]; then run_pep8_changed exit fi if [ $recreate_db -eq 1 ]; then rm -f tests.sqlite fi run_tests # NOTE(sirp): we only want to run pep8 when we're running the full-test suite, # not when we're running tests individually. To handle this, we need to # distinguish between options (testopts), which begin with a '-', and # arguments (testargs). if [ -z "$testargs" ]; then if [ $no_pep8 -eq 0 ]; then run_pep8 fi fi vmware-nsx-12.0.1/AUTHORS0000664000175100017510000005233413244524575015020 0ustar zuulzuul00000000000000AKamyshnikova Aaron Rosen Aaron Rosen Aaron-Zhang231 Abhishek Chanda Abhishek Raut Abhishek Raut Abishek Subramanian Adam Gandelman Adam Harwell Adin Scannell Adit Sarfaty Ailing Zhang Akash Gangil Akash Gangil Akihiro MOTOKI Akihiro Motoki Aleks Chirko Alessandro Pilotti Alessandro Pilotti Alessio Ababilov Alessio Ababilov Alex Holden Alex Kang Alexander Ignatov Alexei Kornienko Alexey I. Froloff Amey Bhide Amir Sadoughi Andre Pech Andrea Frittoli Andreas Jaeger Andreas Jaeger Andrew Boik Andrey Epifanov Angus Lees Ann Kamyshnikova Anna Khmelnitsky Ante Karamatic Anthony Veiga Anton Frolov Arata Notsu Armando Migliaccio Armando Migliaccio Armando Migliaccio Arvind Somy Arvind Somya Assaf Muller Attila Fazekas Avishay Balderman AvnishPal Baodong (Robert) Li Baodong Li Baohua Yang Ben Lin Ben Nemec Ben Nemec Benedikt Trefzer Bernhard M. Wiedemann Bertrand Lallau Bhuvan Arumugam Bo Lin Bob Kukura Bob Melander Boden R Boris Pavlovic Brad Hall Brad Hall Bradley Jones Brandon Logan Brant Knudson Brian Haley Brian Waldon Britt Houser Cady_Chen Cao Xuan Hoang Carl Baldwin Carol Bouchard Cedric Brandily Chandan Kumar Chang Bo Guo Chengli XU Chirag Shahani Christian Berendt Christoph Arnold Christoph Thiel Christopher Chu Lin Chuck Chuck Carlino Chuck Short Clark Boylan Claudiu Belu Clint Byrum Cédric Ollivier Dan Florea Dan Prince Dan Wendlandt Dane LeBlanc Daniel Gollub Darragh O'Reilly Darragh O'Reilly Darren Birkett Davanum Srinivas Dave Cahill Dave Lapsley Dave Tucker David Ripton Dazhao Debo Deepak N Deepthi Kandavara Jayarama Derek Higgins Devang Doshi Dhanashree Gosavi Dirk Mueller Divya ChanneGowda Doug Hellmann Doug Wiegley DuYaHong Duarte Nunes Ed Bak Edgar Magana Edgar Magana Elena Ezhova Emilien Macchi EmilienM Eoghan Glynn Eric Brown Eric Windisch Erik Colnick Eugene Nikanorov Evgeny Fedoruk Fawad Khaliq Flavio Percoco Francisco Souza Franck Yelles Francois Deppierraz Francois Eleouet Gabriel Wainer Gary Kotton Gary Kotton Gauvain Pocentek Ghe Rivero Giridhar Jayavelu Gordon Chung Gordon Chung Guilherme Salgado Guoqiang Ding Haiwei Xu Han Zhou Hareesh Puthalath Harsh Prasad He Jie Xu He Yongli Hemanth Ravi Henry Gessau Henry Gessau Henry Gessau HenryGessau HenryVIII Hiroaki KAWAI Hirofumi Ichihara Hironori Shiina Hisaharu Ishii Hui HX Xiang Hui Xiang Ian Wienand Ignacio Scopetta Ihar Hrachyshka Ilya Pekelny Ilya Shakhat Ionuț Arțăriși Irena Berezovsky Iryoung Jeong Isaku Yamahata Isaku Yamahata Itsuro Oda Itzik Brown Ivan Kolodyazhny Ivar Lazzaro Ivar Lazzaro JJ Asghar JUN JIE NAN Jacek Swiderski Jakub Libosvar James E. Blair James E. Blair James Page Janet Yu Jason Dillaman Jason Kölker Jason Zhang Jaume Devesa Jay Pipes Jay S. Bryant Jeremy Hanmer Jeremy Stanley Jesse Andrews Jiajun Liu Jian Wen Jianing Yang Joe Gordon Joe Harrison Joe Heck Joe Mills John Davidge John Dewey John Dunning John Jason Brzozowski John Kasperski John Perkins John Schwarz Jon Grimm Jonathan LaCour Jordan Tardif Jorge Miramontes Juergen Brendel Julia Varlamova Juliano Martinez Juliano Martinez Julien Danjou Jun Park Justin Hammond Justin Lund KAWAI Hiroaki KIYOHIRO ADACHI Kaiwei Fan Kanzhe Jiang Ken'ichi Ohmichi Keshava Bharadwaj Kevin Benton Kevin Benton Kevin Benton Kevin L. Mitchell Kiall Mac Innes Kobi Samoray Koert van der Veer Koteswara Rao Kelam Koteswara Rao Kelam Kris Lindgren Kui Shi Kun Huang Kyle Mestery Kyle Mestery Lars Kellogg-Stedman Leon Cui Li Ma Li Ma Lianghwa Jou Liping Mao LipingMao Livnat Peer Lorin Hochstein Luis A. Garcia Luiz H Ozaki Luke Gorrie Ly Loi Madhav Puri Major Hayden Mandeep Dhami Manish Godara Marga Millet Mark McClain Mark McClain Mark McLoughlin Mark T. Voelker Martins Jakubovics Maru Newby Maru Newby Maruti Mate Lakat Matt Dietz Matt Odden Matt Riedemann Matthew Treinish Matthew Treinish Matthew Weeks Mehdi Abaakouk Michael J Fork Michael Smith Michael Still Michal Kelner Mishali Miguel Angel Ajo Miguel Lavalle Miguel Ángel Ajo Mike Bayer Mike Kolesnik Mithil Arun Mohammad Banikazemi Monty Taylor Morgan Fainberg Moshe Levi Motohiro OTSUKA Mukul Murali Birru Nachi Ueno Nachi Ueno Nader Lahouti Nick Bartos Nikolay Sobolevskiy Nishant Kumar Numan Siddique Oleg Bondarev Ondřej Nový Paul Michali Paul Ward Peng Xiao Peng Yong Peter Feiner Petrut Lucian Pierre Hanselmann Pierre RAMBAUD Pierre Rognant Piotr Siwczak Piotr Siwczak Pradeep Kilambi Praneet Bachheti Prashant Shetty Prasoon Telang Praveen Yalagandula Preeti Mirji Pritesh Kothari Przemyslaw Czesnowicz Puneet Arora QunyingRan Raghu Katti Rahul Priyadarshi Raildo Mascena Rajaram Mallya Rajeev Grover Rajesh Mohan Rajesh Mohan Rajiv Kumar Ralf Haferkamp Ray Chen Rich Curran Rick Clark Robert Collins Robert Collins Robert Kukura Robert Li Robert Mizielski Robert Pothier RobinWang Roey Chen Roey Chen Rohit Agarwalla Rohit Agarwalla Roman Bogorodskiy Roman Podoliaka Roman Podolyaka Roman Prykhodchenko Roman Sokolkov Romil Gupta RongzeZhu Rosario Di Somma Rossella Sblendido Rossella Sblendido Rudrajit Tapadar Rui Zang Russell Bryant Ryan Moats Ryan Moe Ryan O'Hara Ryan Petrello Ryota MIBU Ryu Ishimoto Sachi King Sahid Orentino Ferdjaoui Saksham Varma Salvatore Salvatore Orlando Salvatore Orlando Salvatore Orlando Sam Betts Sam Hague Samer Deeb Santhosh Santhosh Kumar Sascha Peilicke Sascha Peilicke Sascha Peilicke Saurabh Chordiya Sayaji Sean Dague Sean Dague Sean M. Collins Sean M. Collins Sean McCully Sean McGinnis Sean Mooney Senhua Huang Serge Maskalik Sergey Kolekonov Sergey Lukjanov Sergey Skripnick Sergey Vilgelm Sergey Vilgelm Sergio Cazzolato Shane Wang Shashank Hegde Shashank Hegde Shih-Hao Li Shiv Haris Shivakumar M Shuangtai Tian Shweta P Shweta P Shweta Patil Sidharth Surana Siming Yin Simon Pasquier Sitaram Dontu Soheil Hassas Yeganeh Somik Behera Somik Behera Sourabh Patwardhan Sphoorti Joglekar Sridar Kandaswamy Sridhar Ramaswamy Sridhar S Stanislav Kudriashev Stephen Gordon Stephen Gran Stephen Ma Steven Gonzales Steven Hillman Steven Ren Sudhakar Sudheendra Murthy Sudipta Biswas Sukhdev Sukhdev Sumit Naiksatam Sumit Naiksatam Sushil Kumar Swaminathan Vasudevan Swapnil Kulkarni (coolsvap) Sylvain Afchain Sławek Kapłoński Takaaki Suzuki Takuma Watanabe Tatyana Leontovich Terry Wilson Thierry Carrez Thomas Bechtold Tim Miller Tom Cammann Tom Fifield Tomasz Paszkowski Tomoe Sugihara Tomoko Inoue Tong Liu Trinath Somanchi Tyler Smith Vasiliy Khomenko Vijay Kankatala Vincent Untz Vishal Agarwal Vishal Agarwal Vishvananda Ishaya Vivekanandan Narasimhan Vu Cong Tuan Wei Wang Weidong Shao Wlodzimierz Borkowski Wu Wenxiang Xiaolin Zhang XieYingYun Xu Chen Xu Han Peng Xuhan Peng YAMAMOTO Takashi Yaguang Tang Yalei Wang Yang Yu Yang Yu YangLei Ying Liu Yong Sheng Gong Yong Sheng Gong Yoshihiro Kaneko Youcef Laribi YuYang Yuanchao Sun Yuriy Taraday Yusuke Muraoka Yves-Gwenael Bourhis ZHU ZHU Zang MingJie Zhenguo Niu Zhenmei Zhesen ZhiQiang Fan ZhiQiang Fan Zhongcheng Lao Zhongyue Luo Zuul aaronorosen aaronzhang231 abhishek.talwar alexpilotti armando-migliaccio armando-migliaccio asarfaty ashok2988 berlin cedric.brandily chen-li chnm-kulkarni dekehn e0ne eperdomo eperdomo@cisco.com <> fujioka yuuichi fumihiko kakuma garyduan garyk gecong1973 gengchc2 gessau ghanshyam ghanshyam gongysh gongysh gordon chung hyunsun ivan-zhu jasonrad jingliuqing joe@midokura.com johndavidge jun xie justin Lund kedar kulkarni lawrancejing linb liu-sheng liudong liuqing lizheming llg8212 luke.li lzklibj marios mark mcclain mat mathieu-rohon melissaml mouad benchchaoui ncode openstack rajeev rajeev ritesh.arya rohitagarwalla rohitagarwalla roagarwa@cisco.com <> ronak root root rossella rtmdk@163.com sadasu salvatore <> sanuptpm shaofeng_cheng shihanzhang shu,xinxin siyingchun skseeker snaiksat sridhargaddam stanzgy sukhdev sushma_korati sysnet trinaths venkatamahesh vikas vinkesh banka vishala vmware vmware_nsx_ci wangbo whitekid xchenum yangxurong yuyafei yuyangbj zhanghongtao zhangyanxian zhhuabj Édouard Thuleau vmware-nsx-12.0.1/doc/0000775000175100017510000000000013244524600014473 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/doc/source/0000775000175100017510000000000013244524600015773 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/doc/source/readme.rst0000666000175100017510000000003213244523345017764 0ustar zuulzuul00000000000000.. include:: ../README.rstvmware-nsx-12.0.1/doc/source/devstack.rst0000666000175100017510000002413713244523345020347 0ustar zuulzuul00000000000000NSX DevStack Configurations =========================== Below are the options for configuring the NSX plugin with DevStack. Prior to doing this DevStack needs to be downloaded. After updating the relevant configuration file(s) run ./stack.sh NSXv ---- LBaaS v2 Driver ~~~~~~~~~~~~~~~ Add lbaas repo as an external repository and configure following flags in ``local.conf``:: [[local]|[localrc]] enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas enable_service q-lbaasv2 Configure the service provider:: [[post-config|$NEUTRON_LBAAS_CONF]] [service_providers] service_provider = LOADBALANCERV2:VMWareEdge:neutron_lbaas.drivers.vmware.edge_driver_v2.EdgeLoadBalancerDriverV2:default QoS Driver ~~~~~~~~~~ Enable the qos in ``local.conf``:: [[local|localrc]] ENABLED_SERVICES=q-qos Q_SERVICE_PLUGIN_CLASSES=vmware_nsxv_qos NSXV_USE_DVS_FEATURES = True Optional: Update the nsx qos_peak_bw_multiplier in nsx.ini (default value is 2.0):: [NSX] qos_peak_bw_multiplier = L2GW Driver ~~~~~~~~~~~ Add networking-l2gw repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin networking-l2gw https://github.com/openstack/networking-l2gw ENABLED_SERVICES+=l2gw-plugin NETWORKING_L2GW_SERVICE_DRIVER=L2GW:vmware-nsx-l2gw:vmware_nsx.services.l2gateway.nsx_v.driver.NsxvL2GatewayDriver:default IPAM Driver ~~~~~~~~~~~ Update the ``local.conf`` file:: [[post-config|$NEUTRON_CONF]] [DEFAULT] ipam_driver = vmware_nsxv_ipam Flow Classifier ~~~~~~~~~~~~~~~ Update the ``local.conf`` file:: [[local|localrc]] enable_plugin networking-sfc https://git.openstack.org/openstack/networking-sfc master Q_SERVICE_PLUGIN_CLASSES=networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin [[post-config|$NEUTRON_CONF]] [flowclassifier] drivers = vmware-nsxv-sfc [nsxv] service_insertion_profile_id = In order to prevent tenants from changing the flow classifier, please add the following lines to the policy.json file:: "create_flow_classifier": "rule:admin_only", "update_flow_classifier": "rule:admin_only", "delete_flow_classifier": "rule:admin_only", "get_flow_classifier": "rule:admin_only" FWaaS (V1) Driver ~~~~~~~~~~~~~~~~~ Add neutron-fwaas repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-fwaas https://git.openstack.org/openstack/neutron-fwaas ENABLED_SERVICES+=,q-fwaas-v1 Q_SERVICE_PLUGIN_CLASSES=neutron_fwaas.services.firewall.fwaas_plugin.FirewallPlugin [[post-config|$NEUTRON_CONF]] [fwaas] enabled = True driver = vmware_nsxv_edge Neutron dynamic routing plugin (bgp) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add neutron-dynamic-routing repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-dynamic-routing https://git.openstack.org/openstack/neutron-dynamic-routing DR_MODE=dr_plugin BGP_PLUGIN=vmware_nsx.services.dynamic_routing.bgp_plugin.NSXvBgpPlugin [[post-config|$NEUTRON_CONF]] [DEFAULT] api_extensions_path = $DEST/neutron-dynamic-routing/neutron_dynamic_routing/extensions Neutron VPNaaS ~~~~~~~~~~~~~~ Add neutron-vpnaas repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-vpnaas https://git.openstack.org/openstack/neutron-vpnaas NEUTRON_VPNAAS_SERVICE_PROVIDER=VPN:vmware:vmware_nsx.services.vpnaas.nsxv.ipsec_driver.NSXvIPsecVpnDriver:default NSXv3 ----- QoS Driver ~~~~~~~~~~ Enable the qos in ``local.conf``:: [[local|localrc]] ENABLED_SERVICES+=,q-qos Q_SERVICE_PLUGIN_CLASSES=neutron.services.qos.qos_plugin.QoSPlugin Optional: Update the nsx qos_peak_bw_multiplier in nsx.ini (default value is 2.0):: [NSX] qos_peak_bw_multiplier = L2GW Driver ~~~~~~~~~~~ Add networking-l2gw repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin networking-l2gw https://github.com/openstack/networking-l2gw ENABLED_SERVICES+=l2gw-plugin NETWORKING_L2GW_SERVICE_DRIVER=L2GW:vmware-nsx-l2gw:vmware_nsx.services.l2gateway.nsx_v3.driver.NsxV3Driver:default DEFAULT_BRIDGE_CLUSTER_UUID= IPAM Driver ~~~~~~~~~~~ Update the ``local.conf`` file:: [[post-config|$NEUTRON_CONF]] [DEFAULT] ipam_driver = vmware_nsxv3_ipam Trunk Driver ~~~~~~~~~~~~ Enable trunk service and configure following flags in ``local.conf``:: [[local]|[localrc]] # Trunk plugin NSXv3 driver config ENABLED_SERVICES+=,q-trunk Q_SERVICE_PLUGIN_CLASSES=trunk FWaaS (V1) Driver: ~~~~~~~~~~~~~ Add neutron-fwaas repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-fwaas https://git.openstack.org/openstack/neutron-fwaas ENABLED_SERVICES+=,q-fwaas Q_SERVICE_PLUGIN_CLASSES=neutron_fwaas.services.firewall.fwaas_plugin.FirewallPlugin [[post-config|$NEUTRON_CONF]] [fwaas] enabled = True driver = vmware_nsxv3_edge_v1 FWaaS (V2) Driver ~~~~~~~~~~~~~~~~~ Add neutron-fwaas repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-fwaas https://git.openstack.org/openstack/neutron-fwaas ENABLED_SERVICES+=,q-fwaas-v2 Q_SERVICE_PLUGIN_CLASSES=neutron_fwaas.services.firewall.fwaas_plugin_v2.FirewallPluginV2 [[post-config|$NEUTRON_CONF]] [fwaas] enabled = True driver = vmware_nsxv3_edge_v2 LBaaS v2 Driver ~~~~~~~~~~~~~~~ Add lbaas repo as an external repository and configure following flags in ``local.conf``:: [[local]|[localrc]] enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas enable_service q-lbaasv2 Configure the service provider:: [[post-config|$NEUTRON_LBAAS_CONF]] [service_providers] service_provider = LOADBALANCERV2:VMWareEdge:neutron_lbaas.drivers.vmware.edge_driver_v2.EdgeLoadBalancerDriverV2:default Neutron VPNaaS ~~~~~~~~~~~~~~ Add neutron-vpnaas repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-vpnaas https://git.openstack.org/openstack/neutron-vpnaas NEUTRON_VPNAAS_SERVICE_PROVIDER=VPN:vmware:vmware_nsx.services.vpnaas.nsxv3.ipsec_driver.NSXv3IPsecVpnDriver:default NSX-TVD ------- LBaaS v2 Driver ~~~~~~~~~~~~~~~ Add lbaas repo as an external repository and configure following flags in ``local.conf``:: [[local]|[localrc]] enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas enable_service q-lbaasv2 Q_SERVICE_PLUGIN_CLASSES=vmware_nsxtvd_lbaasv2 Configure the service provider:: [[post-config|$NEUTRON_LBAAS_CONF]] [service_providers] service_provider = LOADBALANCERV2:VMWareEdge:neutron_lbaas.drivers.vmware.edge_driver_v2.EdgeLoadBalancerDriverV2:default [[post-config|$NEUTRON_CONF]] [DEFAULT] api_extensions_path = $DEST/neutron-lbaas/neutron_lbaas/extensions FWaaS (V1) Driver: ~~~~~~~~~~~~~ Add neutron-fwaas repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-fwaas https://git.openstack.org/openstack/neutron-fwaas ENABLED_SERVICES+=,q-fwaas Q_SERVICE_PLUGIN_CLASSES=vmware_nsxtvd_fwaasv1 [[post-config|$NEUTRON_CONF]] [fwaas] enabled = True driver = vmware_nsxtvd_edge_v1 [DEFAULT] api_extensions_path = $DEST/neutron-fwaas/neutron_fwaas/extensions FWaaS (V2) Driver ~~~~~~~~~~~~~~~~~ Add neutron-fwaas repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-fwaas https://git.openstack.org/openstack/neutron-fwaas ENABLED_SERVICES+=,q-fwaas-v2 Q_SERVICE_PLUGIN_CLASSES+=,vmware_nsxtvd_fwaasv2 [[post-config|$NEUTRON_CONF]] [fwaas] enabled = True driver = vmware_nsxtvd_edge_v2 [DEFAULT] api_extensions_path = $DEST/neutron-fwaas/neutron_fwaas/extensions L2GW Driver ~~~~~~~~~~~ Add networking-l2gw repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin networking-l2gw https://github.com/openstack/networking-l2gw ENABLED_SERVICES+=l2gw-plugin NETWORKING_L2GW_SERVICE_DRIVER=L2GW:vmware-nsx-l2gw:vmware_nsx.services.l2gateway.nsx_tvd.driver.NsxTvdL2GatewayDriver:default DEFAULT_BRIDGE_CLUSTER_UUID= Q_SERVICE_PLUGIN_CLASSES+=,vmware_nsxtvd_l2gw [[post-config|$NEUTRON_CONF]] [DEFAULT] api_extensions_path = $DEST/networking-l2gateway/networking_l2gw/extensions QoS Driver ~~~~~~~~~~ Enable the qos in ``local.conf``:: [[local|localrc]] ENABLED_SERVICES+=,q-qos Q_SERVICE_PLUGIN_CLASSES+=,vmware_nsxtvd_qos Neutron dynamic routing plugin (bgp) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add neutron-dynamic-routing repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-dynamic-routing https://git.openstack.org/openstack/neutron-dynamic-routing DR_MODE=dr_plugin BGP_PLUGIN=vmware_nsx.services.dynamic_routing.bgp_plugin.NSXBgpPlugin [[post-config|$NEUTRON_CONF]] [DEFAULT] api_extensions_path = $DEST/neutron-dynamic-routing/neutron_dynamic_routing/extensions Neutron VPNaaS ~~~~~~~~~~~~~~ Add neutron-vpnaas repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-vpnaas https://git.openstack.org/openstack/neutron-vpnaas NEUTRON_VPNAAS_SERVICE_PROVIDER=VPN:vmware:vmware_nsx.services.vpnaas.nsx_tvd.ipsec_driver.NSXIPsecVpnDriver:default Q_SERVICE_PLUGIN_CLASSES+=,vmware_nsxtvd_vpnaas [[post-config|$NEUTRON_CONF]] [DEFAULT] api_extensions_path = $DEST/neutron-vpnaas/neutron_vpnaas/extensions IPAM Driver ~~~~~~~~~~~ Update the ``local.conf`` file:: [[post-config|$NEUTRON_CONF]] [DEFAULT] ipam_driver = vmware_nsxtvd_ipam vmware-nsx-12.0.1/doc/source/usage.rst0000666000175100017510000000010413244523345017633 0ustar zuulzuul00000000000000======== Usage ======== To use in a project:: import vmware_nsx vmware-nsx-12.0.1/doc/source/contributing.rst0000666000175100017510000000004313244523345021240 0ustar zuulzuul00000000000000.. include:: ../../CONTRIBUTING.rstvmware-nsx-12.0.1/doc/source/admin_util.rst0000666000175100017510000004110013244523413020651 0ustar zuulzuul00000000000000Admin Utility ============= The NSXv and the NSXv3 support the nsxadmin utility. This enables and administrator to determine and rectify inconsistencies between the Neutron DB and the NSX. usage: nsxadmin -r -o NSXv ---- The following resources are supported: 'security-groups', 'edges', 'networks', 'firewall-sections', 'orphaned-edges', 'spoofguard-policy', 'missing-edges', 'backup-edges', 'nsx-security-groups', 'dhcp-binding' and 'metadata' Edges ~~~~~ - List backend NSX edges with their id, name and some more information:: nsxadmin -r edges -o nsx-list - List backend NSX edges with more details:: nsxadmin -r edges -o nsx-list --verbose - Neutron list:: nsxadmin -r edges -o neutron-list - Update Resource pool / Datastore on all edges in the backend. This utility can update resource pool and datastore ID of all edges to the nsx.ini configuration:: nsxadmin -r edges -o nsx-update-all --property appliances=True - Update Resource pool / Datastore / edge HA of an edge: This utility can be used on upgrade after the customer added ha_datastore_id to the nsx.ini configuration or after changing the resource pool / data store globally or per availability zone. This Utility can update the deployment of existing edges:: nsxadmin -r edges -o nsx-update --property edge-id= --property appliances=True - Update the size of an edge:: nsxadmin -r edges -o nsx-update --property edge-id=edge-55 --property size=compact - Update the high availability of an edge: enable/disable high availability of an edge:: nsxadmin -r edges -o nsx-update --property edge-id=edge-55 --property highavailability= - Update syslog config on edge (syslog-proto and syslog-server2 are optional):: nsxadmin -o nsx-update -r edges -p edge-id=edge-55 --property syslog-server= --property syslog-server2= --property syslog-proto= - Delete syslog config on edge:: nsxadmin -o nsx-update -r edges -p edge-id=edge-55 --property syslog-server=none - Enable logging with specified log level for specific module (routing, dns, dhcp, highavailability, loadbalancer) on edge:: nsxadmin -o nsx-update -r edges -p edge-id=edge-55 --property routing-log-level=debug - Enable logging with specified log level for all supported modules on edge:: nsxadmin -o nsx-update -r edges -p edge-id=edge-55 --property log-level=debug - Disable logging on edge:: nsxadmin -o nsx-update -r edges -p edge-id=edge-55 --property log-level=none - Update reservations of an edge:: nsxadmin -o nsx-update -r edges -p edge-id=edge-55 --property resource= --property limit= --property reservation= --property shares= - Update DRS hostgroups for an edge:: nsxadmin -o nsx-update -r edges -p edge-id=edge-55 --property hostgroup=update|all - Update DRS hostgroups for all edges:: nsxadmin -o nsx-update -r edges --property hostgroup=all - Clean all DRS hostgroups for all edges:: nsxadmin -o nsx-update -r edges --property hostgroup=clean Orphaned Edges ~~~~~~~~~~~~~~ - List orphaned edges (exist on NSXv backend but don't have a corresponding binding in Neutron DB):: nsxadmin -r orphaned-edges -o list - Clean orphaned edges (delete edges from NSXv backend):: nsxadmin -r orphaned-edges -o clean Orphaned Router bindings ~~~~~~~~~~~~~~~~~~~~~~~~ - List orphaned router bindings entries (exist on the router bindings DB table, but the neutron object behind them (router, network, or loadbalancer) is missing):: nsxadmin -r orphaned-bindings -o list - Clean orphaned router bindings entries (delete DB entry):: nsxadmin -r orphaned-bindings -o clean Orphaned Router VNICs ~~~~~~~~~~~~~~~~~~~~~ - List orphaned router vnic entries (exist on the edge vnics bindings DB table, but the neutron interface port behind them is missing):: nsxadmin -r orphaned-vnics -o list - Clean orphaned router vnics (delete DB entry, and NSX router interface):: nsxadmin -r orphaned-vnics -o clean Missing Edges ~~~~~~~~~~~~~ - List missing edges on NSX. This includes missing networks on those edges:: nsxadmin -r missing-edges -o list Backup Edges ~~~~~~~~~~~~ - List backend backup edges with their id, name and some more information:: nsxadmin -r backup-edges -o list - Delete backup edge:: nsxadmin -r backup-edges -o clean --property edge-id=edge-9 - Delete all backup edges:: nsxadmin -r backup-edges -o clean-all - List Edge name mismatches between DB and backend, and backup edges that are missing from the backend:: nsxadmin -r backup-edges -o list-mismatches - Fix Edge name mismatch between DB and backend by updating the name on the backend:: nsxadmin -r backup-edges -o fix-mismatch --property edge-id=edge-9 - Delete a backup edge from the DB and NSX by it's router ID:: nsxadmin -r backup-edges -o neutron-clean --property router-id=backup-26ab1a3a-d73d DHCP Bindings ~~~~~~~~~~~~~ - List missing DHCP bindings: list dhcp edges that are missing from the NSXv backend:: nsxadmin -r dhcp-binding -o list - Update DHCP bindings on an edge:: nsxadmin -r dhcp-binding -o nsx-update --property edge-id=edge-15 - Recreate DHCP edge by moving all the networks to other edges:: nsxadmin -r dhcp-binding -o nsx-recreate --property edge-id=edge-222 - Recreate DHCP edge for a specific network (when the edge does not exist):: nsxadmin -r dhcp-binding -o nsx-recreate --property net-id=5253ae45-75b4-4489-8aa1-6a9e1cfa80a6 - Redistribute networks on dhcp edges (for example when configuration of share_edges_between_tenants changes):: nsxadmin -r dhcp-binding -o nsx-redistribute Routers ~~~~~~~ - Recreate a router edge by moving the router/s to other edge/s:: nsxadmin -r routers -o nsx-recreate --property edge-id=edge-308 - Recreate a router on the nsx backend by removing it from the current edge (if any), and attaching to a new one:: nsxadmin -r routers -o nsx-recreate --property router-id=8cdd6d06-b457-4cbb-a0b1-41e08ccce287 - Redistribute shared routers on edges (for example when configuration of share_edges_between_tenants changes):: nsxadmin -r routers -o nsx-redistribute - Migrate NSXv metadata infrastructure for VDRs - use regular DHCP edges for VDR:: nsxadmin -r routers -o migrate-vdr-dhcp Networks ~~~~~~~~ - Ability to update or get the teaming policy for a DVS:: nsxadmin -r networks -o nsx-update --property dvs-id= --property teamingpolicy= - List backend networks and their network morefs:: nsxadmin -r networks -o list Missing Networks ~~~~~~~~~~~~~~~~ - List networks which are missing from the backend:: nsxadmin -r missing-networks -o list Orphaned Networks ~~~~~~~~~~~~~~~~~ - List networks which are missing from the neutron DB:: nsxadmin -r orphaned-networks -o list - Delete a backend network by it's moref:: nsxadmin -r orphaned-networks -o nsx-clean --property moref= Security Groups, Firewall and Spoofguard ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Security groups. This adds support to list security-groups mappings and miss-matches between the mappings and backend resources as: firewall-sections and nsx-security-groups:: nsxadmin --resource security-groups --operation list nsxadmin -r nsx-security-groups -o {list, list-missmatches} nsxadmin -r firewall-sections -o {list, list-missmatches, nsx-update} - Spoofguard support:: nsxadmin -r spoofguard-policy -o list-mismatches nsxadmin -r spoofguard-policy -o clean --property policy-id=spoofguardpolicy-10 nsxadmin -r spoofguard-policy -o list --property reverse (entries defined on NSXv and not in Neutron) - Migrate a security group from using rules to using a policy nsxadmin -r security-groups -o migrate-to-policy --property policy-id=policy-10 --property security-group-id=733f0741-fa2c-4b32-811c-b78e4dc8ec39 - Reorder the nsx L3 firewall sections to correctly support the policy security groups nsxadmin -r firewall-sections -o nsx-reorder - Update the default cluster section nsxadmin -r firewall-sections -o nsx-update Metadata ~~~~~~~~ - Update loadbalancer members on router and DHCP edges:: nsxadmin -r metadata -o nsx-update - Update shared secret on router and DHCP edges:: nsxadmin -r metadata -o nsx-update-secret - Retrieve metadata connectivity - optionally for a specific network:: nsxadmin -r metadata -o status [--property network_id=] Config ~~~~~~ - Validate the configuration in the nsx.ini and backend connectivity nsxadmin -r config -o validate NSXv3 ----- The following resources are supported: 'security-groups', 'routers', 'networks', 'nsx-security-groups', 'dhcp-binding', 'metadata-proxy', 'orphaned-dhcp-servers', 'firewall-sections', 'certificate', 'orphaned-networks', 'orphaned-routers', and 'ports'. Networks ~~~~~~~~ - List missing networks:: nsxadmin -r networks -o list-mismatches Orphaned Networks ~~~~~~~~~~~~~~~~~ - List networks (logical switches) which are missing from the neutron DB:: nsxadmin -r orphaned-networks -o list - Delete a backend network (logical switch) by it's nsx-id:: nsxadmin -r orphaned-networks -o nsx-clean --property nsx-id= Routers ~~~~~~~ - List missing routers:: nsxadmin -r routers -o list-mismatches - Update NAT rules on all routers to stop bypassing the FW rules. This is useful for NSX version 2.0 & up, before starting to use FWaaS nsxadmin -r routers -o nsx-update-rules - Update DHCP relay service on NSX router ports according to the current configuration:: nsxadmin -r routers -o nsx-update-dhcp-relay Orphaned Routers ~~~~~~~~~~~~~~~~~ - List logical routers which are missing from the neutron DB:: nsxadmin -r orphaned-routers -o list - Delete a backend logical router by it's nsx-id:: nsxadmin -r orphaned-routers -o nsx-clean --property nsx-id= Ports ~~~~~ - List missing ports, and ports that exist on backend but without the expected switch profiles:: nsxadmin -r ports -o list-mismatches - Update the VMs ports on the backend after migrating nsx-v -> nsx-v3:: nsxadmin -r ports -o nsx-migrate-v-v3 - Migrate exclude ports to use tags:: nsxadmin -r ports -o migrate-exclude-ports - Tag ports to be part of the default OS security group:: nsxadmin -r ports -o nsx-tag-default Security Groups & NSX Security Groups ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - List backed security groups:: nsxadmin -r nsx-security-groups -o list - List neutron security groups:: nsxadmin -r security-groups -o list - Fix mismatch sections in security group:: nsxadmin -r security-groups -o fix-mismatch - List nsx security groups with mismatch sections:: nsxadmin -r nsx-security-groups -o list-mismatches - Update NSX security groups dynamic criteria for NSXv3 CrossHairs:: nsxadmin -r nsx-security-groups -o migrate-to-dynamic-criteria Firewall Sections ~~~~~~~~~~~~~~~~~ - List backed firewall sections:: nsxadmin -r firewall-sections -o list - List security groups with missing sections:: nsxadmin -r firewall-sections -o list-mismatches Metadata Proxy ~~~~~~~~~~~~~~ - List version 1.0.0 metadata networks in Neutron:: nsxadmin -r metadata-proxy -o list - Resync metadata proxies for NSXv3 version 1.1.0 and above (enable md proxy, or update the uuid). This is only for migrating to native metadata support:: nsxadmin -r metadata-proxy -o nsx-update --property metadata_proxy_uuid= - update the ip of the Nova server in the metadata proxy server on the nsx nsxadmin -r metadata-proxy -o nsx-update-ip --property server-ip= --property availability-zone= DHCP Bindings ~~~~~~~~~~~~~ - List DHCP bindings in Neutron:: nsxadmin -r dhcp-binding -o list - Resync DHCP bindings for NSXv3 version 1.1.0 and above. This is only for migrating to native DHCP support:: nsxadmin -r dhcp-binding -o nsx-update --property dhcp_profile_uuid= - Recreate dhcp server for a neutron network:: nsxadmin -r dhcp-binding -o nsx-recreate --property net-id= Orphaned DHCP Servers ~~~~~~~~~~~~~~~~~~~~~ - List orphaned DHCP servers (exist on NSXv3 backend but don't have a corresponding binding in Neutron DB):: nsxadmin -r orphaned-dhcp-servers -o nsx-list - Clean orphaned DHCP servers (delete logical DHCP servers from NSXv3 backend):: nsxadmin -r orphaned-dhcp-servers -o nsx-clean Client Certificate ~~~~~~~~~~~~~~~~~~ - Generate new client certificate (this command will delete previous certificate if exists):: nsxadmin -r certificate -o generate [--property username= --property password= --property key-size= --property sig-alg= --property valid-days= --property country= --property state= --property org= --property unit= --property host=] - Delete client certificate:: nsxadmin -r certificate -o clean - Show client certificate details:: nsxadmin -r certificate -o show - Import external certificate to NSX:: nsxadmin -r certificate -o import [--property username= --property password= --property filename=] - List certificates associated with openstack principal identity in NSX:: nsxadmin -r certificate -o nsx-list BGP GW edges ~~~~~~~~~~~~ - Create new BGP GW edge:: nsxadmin -r bgp-gw-edge -o create --property name= --property local-as= --property external-iface=: --property internal-iface=: - Delete BGP GW edge:: nsxadmin -r bgp-gw-edge -o delete --property gw-edge-id= - List BGP GW edges:: nsxadmin -r bgp-gw-edge -o list - Add a redistribution rule to a BGP GW edges:: nsxadmin -r routing-redistribution-rule -o create --property edge-ids=[,...] [--property prefix=] --property learner-protocol= --property learn-from=ospf,bgp,connected,static --property action= - Remove a redistribution rule from BGP GW edges:: nsxadmin -r routing-redistribution-rule -o delete --property gw-edge-ids=[,...] [--property prefix-name=] - Add a new BGP neighbour to BGP GW edges:: nsxadmin -r bgp-neighbour -o create --property gw-edge-ids=[,...] --property ip-address= --property remote-as= --property --password= - Remove BGP neighbour from BGP GW edges:: nsxadmin -r bgp-neighbour -o delete --property gw-edge-ids=[,...] --property ip-address= LBaaS ~~~~~ - List NSX LB services:: nsxadmin -r lb-services -o list - List NSX LB virtual servers:: nsxadmin -r lb-virtual-servers -o list - List NSX LB pools:: nsxadmin -r lb-pools -o list - List NSX LB monitors:: nsxadmin -r lb-monitors -o list Rate Limit ~~~~~~~~~~ - Show the current NSX rate limit: nsxadmin -r rate-limit -o show - Update the NSX rate limit: nsxadmin -r rate-limit -o nsx-update --property value=<> NSXtvd ------ - All the nsx-v/v3 utilities can be used by calling nsxadmin --plugin nsxv/v3 -r <> -o <> - Add mapping between projects and plugin before starting to use the tvd plugin: nsxadmin -r projects -o import --property plugin=nsx-v --property project=<> Config ~~~~~~ - Validate the configuration in the nsx.ini and backend connectivity nsxadmin -r config -o validate Upgrade Steps (Version 1.0.0 to Version 1.1.0) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1. Upgrade NSX backend from version 1.0.0 to version 1.1.0 2. Create a DHCP-Profile and a Metadata-Proxy in NSX backend 3. Stop Neutron 4. Install version 1.1.0 Neutron plugin 5. Run admin tools to migrate version 1.0.0 objects to version 1.1.0 objects nsxadmin -r metadata-proxy -o nsx-update --property metadata_proxy_uuid= nsxadmin -r dhcp-binding -o nsx-update --property dhcp_profile_uuid= 6. Start Neutron 7. Make sure /etc/nova/nova.conf has metadata_proxy_shared_secret = 8. Restart VMs or ifdown/ifup their network interface to get new DHCP options Steps to create a TVD admin user ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Do the following steps: source devstack/openrc admin admin openstack project create admin_v --domain=default --or-show -f value -c id openstack user create admin_v --password password --domain=default --email=alt_demo@example.com --or-show -f value -c id openstack role add admin --user --project Or run: devstack/tools/create_userrc.sh Then: openstack project plugin create --plugin nsx-v vmware-nsx-12.0.1/doc/source/housekeeper.rst0000666000175100017510000000445313244523345021061 0ustar zuulzuul00000000000000Plugin Housekeeper ================== During the Neutron plugin's operation, system may enter an inconsistent state due to synchronization issues between different components, e.g Neutron and NSX or NSX and vCenter. Some of these inconsistencies may impact the operation of various system elements. The Housekeeping mechanism should: a) Detect such inconsistencies and warn about them. b) Resolve inconsistencies when possible. Some of these inconsistencies can be resolved using the Admin utility, yet it requires manual operation by the administrator while the housekeeping mechanism should be automatic. Configuration ------------- Housekeeping mechanism uses two configuration parameters: nsxv.housekeeping_jobs: The housekeeper can be configured which tasks to execute and which should be skipped. nsxv.housekeeping_readonly: Housekeeper may attempt to fix a broken environment when this flag is set to False, or otherwise will just warn about inconsistencies. Operation --------- The housekeeping mechanism is an extension to the Neutron plugin. Therefore it can be triggered by accessing the extension's URL with an administrator context. A naive devstack example could be:: source devstack/openrc admin demo export AUTH_TOKEN=`openstack token issue | awk '/ id /{print $4}'` curl -X PUT -s -H "X-Auth-Token: $AUTH_TOKEN" -H 'Content-Type: application/json' -d '{"housekeeper": {}}' http://:9696/v2.0/housekeepers/all Where would be the Neutron controller's IP or the virtual IP of the load balancer which manages the Neutron controllers. It is important to use the virtual IP in case of a load balanced active-backup Neutron servers, as otherwise the housekeeping request may be handled by the wrong controller. To operate the housekeeper periodically as it should, it should be scheduled via a timing mechanism such as Linux cron. Plugin Jobs ----------- NSX-v ~~~~~ error_dhcp_edge: scans for DHCP Edge appliances which are in ERROR state. When in non-readonly mode, the job will attempt recovery of the DHCP edges by removing stale elements from the Neutron DB and reconfigure the interfaces at the backend when required. error_backup_edge: scans from backup Edge appliances which are in ERROR state. When in non-readonly mode, the job will reset the Edge appliance configuration. vmware-nsx-12.0.1/doc/source/conf.py0000666000175100017510000000577313244523345017315 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import fileinput import fnmatch sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', #'sphinx.ext.intersphinx', 'oslosphinx' ] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # A list of glob-style patterns that should be excluded when looking for source # files. exclude_patterns = [ 'api/tests.*', # avoid of docs generation from tests 'api/oslo.vmware._*', # skip private modules ] # Prune the excluded patterns from the autoindex PATH = 'api/autoindex.rst' if os.path.isfile(PATH) and os.access(PATH, os.R_OK): for line in fileinput.input(PATH, inplace=True): found = False for pattern in exclude_patterns: if fnmatch.fnmatch(line, '*' + pattern[4:]): found = True if not found: print(line) # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'oslo.vmware' copyright = u'2014, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # html_static_path = ['static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual'), ] # Example configuration for intersphinx: refer to the Python standard library. #intersphinx_mapping = {'http://docs.python.org/': None} vmware-nsx-12.0.1/doc/source/history.rst0000666000175100017510000000003513244523345020233 0ustar zuulzuul00000000000000.. include:: ../../ChangeLog vmware-nsx-12.0.1/doc/source/index.rst0000666000175100017510000000055213244523345017645 0ustar zuulzuul00000000000000Welcome to vmware-nsx's documentation! ======================================= Contents: .. toctree:: :maxdepth: 2 readme installation usage contributing history Code Documentation ================== .. toctree:: :maxdepth: 1 api/autoindex Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` vmware-nsx-12.0.1/doc/source/installation.rst0000666000175100017510000000031013244523345021227 0ustar zuulzuul00000000000000============ Installation ============ At the command line:: $ pip install vmware-nsx Or, if you have virtualenvwrapper installed:: $ mkvirtualenv vmware-nsx $ pip install vmware-nsx vmware-nsx-12.0.1/doc/requirements.txt0000666000175100017510000000065713244523345017776 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. sphinx!=1.6.6,>=1.6.2 # BSD oslosphinx>=4.7.0 # Apache-2.0 reno>=2.5.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD testresources>=2.0.0 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD oslotest>=3.2.0 # Apache-2.0 vmware-nsx-12.0.1/LICENSE0000666000175100017510000002363713244523345014755 0ustar zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. vmware-nsx-12.0.1/.pylintrc0000666000175100017510000000753513244523345015614 0ustar zuulzuul00000000000000# The format of this file isn't really documented; just use --generate-rcfile [MASTER] # Add to the black list. It should be a base name, not a # path. You may set this option multiple times. # # Note the 'openstack' below is intended to match only # neutron.openstack.common. If we ever have another 'openstack' # dirname, then we'll need to expand the ignore features in pylint :/ ignore=.git,tests,openstack [MESSAGES CONTROL] # NOTE(gus): This is a long list. A number of these are important and # should be re-enabled once the offending code is fixed (or marked # with a local disable) disable= # "F" Fatal errors that prevent further processing import-error, # "I" Informational noise locally-disabled, # "E" Error for important programming issues (likely bugs) access-member-before-definition, no-member, no-method-argument, no-self-argument, not-an-iterable, # "W" Warnings for stylistic problems or minor programming issues abstract-method, abstract-class-instantiated, arguments-differ, attribute-defined-outside-init, bad-builtin, bad-indentation, broad-except, dangerous-default-value, deprecated-lambda, expression-not-assigned, fixme, global-statement, literal-comparison, no-init, non-parent-init-called, not-callable, protected-access, redefined-builtin, redefined-outer-name, signature-differs, star-args, super-init-not-called, super-on-old-class, unpacking-non-sequence, unused-argument, unused-import, unused-variable, unsubscriptable-object, useless-super-delegation, # TODO(dougwig) - disable nonstandard-exception while we have neutron_lib shims nonstandard-exception, # "C" Coding convention violations bad-continuation, consider-iterating-dictionary, consider-using-enumerate, invalid-name, len-as-condition, misplaced-comparison-constant, missing-docstring, singleton-comparison, superfluous-parens, ungrouped-imports, wrong-import-order, wrong-import-position, # "R" Refactor recommendations abstract-class-little-used, abstract-class-not-used, consider-merging-isinstance, consider-using-ternary, duplicate-code, interface-not-implemented, no-else-return, no-self-use, redefined-argument-from-local, simplifiable-if-statement, too-few-public-methods, too-many-ancestors, too-many-arguments, too-many-boolean-expressions, too-many-branches, too-many-function-args, too-many-instance-attributes, too-many-lines, too-many-locals, too-many-nested-blocks, too-many-public-methods, too-many-return-statements, too-many-statements, cyclic-import, no-name-in-module, bad-super-call [BASIC] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowecased with underscores method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$ # Module names matching neutron-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [FORMAT] # Maximum number of characters on a single line. max-line-length=79 [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. # _ is used by our localization additional-builtins=_ [CLASSES] # List of interface methods to ignore, separated by a comma. ignore-iface-methods= [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules= # should use openstack.common.jsonutils json [TYPECHECK] # List of module names for which member attributes should not be checked ignored-modules=six.moves,_MovedItems [REPORTS] # Tells whether to display a full report or only the messages reports=no vmware-nsx-12.0.1/vmware_nsx.egg-info/0000775000175100017510000000000013244524600017611 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx.egg-info/entry_points.txt0000664000175100017510000000742613244524575023133 0ustar zuulzuul00000000000000[console_scripts] neutron-check-nsx-config = vmware_nsx.check_nsx_config:main nsx-migration = vmware_nsx.api_replay.cli:main nsxadmin = vmware_nsx.shell.nsxadmin:main [firewall_drivers] vmware_nsxtvd_edge_v1 = vmware_nsx.services.fwaas.nsx_tv.edge_fwaas_driver_v1:EdgeFwaasTVDriverV1 vmware_nsxtvd_edge_v2 = vmware_nsx.services.fwaas.nsx_tv.edge_fwaas_driver_v2:EdgeFwaasTVDriverV2 vmware_nsxv3_edge = vmware_nsx.services.fwaas.nsx_v3.edge_fwaas_driver_v1:EdgeFwaasV3DriverV1 vmware_nsxv3_edge_v1 = vmware_nsx.services.fwaas.nsx_v3.edge_fwaas_driver_v1:EdgeFwaasV3DriverV1 vmware_nsxv3_edge_v2 = vmware_nsx.services.fwaas.nsx_v3.edge_fwaas_driver_v2:EdgeFwaasV3DriverV2 vmware_nsxv_edge = vmware_nsx.services.fwaas.nsx_v.edge_fwaas_driver:EdgeFwaasDriver [networking_sfc.flowclassifier.drivers] vmware-nsxv-sfc = vmware_nsx.services.flowclassifier.nsx_v.driver:NsxvFlowClassifierDriver [neutron.core_plugins] vmware_dvs = vmware_nsx.plugin:NsxDvsPlugin vmware_nsx = vmware_nsx.plugin:NsxPlugin vmware_nsxtvd = vmware_nsx.plugin:NsxTVDPlugin vmware_nsxv = vmware_nsx.plugin:NsxVPlugin vmware_nsxv3 = vmware_nsx.plugin:NsxV3Plugin [neutron.db.alembic_migrations] vmware-nsx = vmware_nsx.db.migration:alembic_migrations [neutron.ipam_drivers] vmware_nsxtvd_ipam = vmware_nsx.services.ipam.nsx_tvd.driver:NsxTvdIpamDriver vmware_nsxv3_ipam = vmware_nsx.services.ipam.nsx_v3.driver:Nsxv3IpamDriver vmware_nsxv_ipam = vmware_nsx.services.ipam.nsx_v.driver:NsxvIpamDriver [neutron.qos.notification_drivers] vmware_nsxv3_message_queue = vmware_nsx.services.qos.nsx_v3.message_queue:NsxV3QosNotificationDriver [neutron.service_plugins] vmware_nsxtvd_fwaasv1 = vmware_nsx.services.fwaas.nsx_tv.plugin_v1:FwaasTVPluginV1 vmware_nsxtvd_fwaasv2 = vmware_nsx.services.fwaas.nsx_tv.plugin_v2:FwaasTVPluginV2 vmware_nsxtvd_l2gw = vmware_nsx.services.l2gateway.nsx_tvd.plugin:L2GatewayPlugin vmware_nsxtvd_lbaasv2 = vmware_nsx.services.lbaas.nsx.plugin:LoadBalancerTVPluginV2 vmware_nsxtvd_qos = vmware_nsx.services.qos.nsx_tvd.plugin:QoSPlugin vmware_nsxtvd_vpnaas = vmware_nsx.services.vpnaas.nsx_tvd.plugin:VPNPlugin vmware_nsxv_qos = vmware_nsx.services.qos.nsx_v.plugin:NsxVQosPlugin [openstack.cli.extension] nsxclient = vmware_nsx.osc.plugin [openstack.nsxclient.v2] port_create = vmware_nsx.osc.v2.port:NsxCreatePort port_set = vmware_nsx.osc.v2.port:NsxSetPort project_plugin_create = vmware_nsx.osc.v2.project_plugin_map:CreateProjectPluginMap project_plugin_list = vmware_nsx.osc.v2.project_plugin_map:ListProjectPluginMap project_plugin_show = vmware_nsx.osc.v2.project_plugin_map:ShowProjectPluginMap router_create = vmware_nsx.osc.v2.router:NsxCreateRouter router_set = vmware_nsx.osc.v2.router:NsxSetRouter security_group_create = vmware_nsx.osc.v2.security_group:NsxCreateSecurityGroup security_group_set = vmware_nsx.osc.v2.security_group:NsxSetSecurityGroup subnet_create = vmware_nsx.osc.v2.subnet:NsxCreateSubnet subnet_set = vmware_nsx.osc.v2.subnet:NsxSetSubnet [oslo.config.opts] nsx = vmware_nsx.opts:list_opts [vmware_nsx.extension_drivers] vmware_dvs_dns = vmware_nsx.extension_drivers.dns_integration:DNSExtensionDriverDVS vmware_nsxv3_dns = vmware_nsx.extension_drivers.dns_integration:DNSExtensionDriverNSXv3 vmware_nsxv_dns = vmware_nsx.extension_drivers.dns_integration:DNSExtensionDriverNSXv [vmware_nsx.neutron.nsxv.housekeeper.jobs] error_backup_edge = vmware_nsx.plugins.nsx_v.housekeeper.error_backup_edge:ErrorBackupEdgeJob error_dhcp_edge = vmware_nsx.plugins.nsx_v.housekeeper.error_dhcp_edge:ErrorDhcpEdgeJob [vmware_nsx.neutron.nsxv.router_type_drivers] distributed = vmware_nsx.plugins.nsx_v.drivers.distributed_router_driver:RouterDistributedDriver exclusive = vmware_nsx.plugins.nsx_v.drivers.exclusive_router_driver:RouterExclusiveDriver shared = vmware_nsx.plugins.nsx_v.drivers.shared_router_driver:RouterSharedDriver vmware-nsx-12.0.1/vmware_nsx.egg-info/not-zip-safe0000664000175100017510000000000113244524306022042 0ustar zuulzuul00000000000000 vmware-nsx-12.0.1/vmware_nsx.egg-info/top_level.txt0000664000175100017510000000001313244524575022350 0ustar zuulzuul00000000000000vmware_nsx vmware-nsx-12.0.1/vmware_nsx.egg-info/PKG-INFO0000664000175100017510000000340613244524575020724 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: vmware-nsx Version: 12.0.1 Summary: VMware NSX library for OpenStack projects Home-page: https://launchpad.net/vmware-nsx Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description-Content-Type: UNKNOWN Description: =================== VMware-NSX package =================== You have come across the VMware-NSX family of Neutron plugins External Resources: ------------------- The homepage for the VMware-NSX project is on Launchpad_. .. _Launchpad: https://launchpad.net/vmware-nsx Use this site for asking for help, and filing bugs. Code is available both git.openstack.org_ and github_. .. _git.openstack.org: https://git.openstack.org/cgit/openstack/vmware-nsx/tree/ .. _github: https://github.com/openstack/vmware-nsx For help on usage and hacking of VMware-NSX, please send a message to the openstack-dev_ mailing list. .. _openstack-dev: mailto:openstack-dev@lists.openstack.org For information on how to contribute to VMware-NSX, please see the contents of the CONTRIBUTING.rst file. Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 vmware-nsx-12.0.1/vmware_nsx.egg-info/requires.txt0000664000175100017510000000104213244524575022221 0ustar zuulzuul00000000000000pbr!=2.1.0,>=2.0.0 enum34>=1.0.4 eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 httplib2>=0.9.1 netaddr>=0.7.18 tenacity>=3.2.1 SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 six>=1.10.0 stevedore>=1.20.0 neutron-lib>=1.13.0 osc-lib>=1.8.0 python-openstackclient>=3.12.0 oslo.concurrency>=3.25.0 oslo.context>=2.19.2 oslo.config>=5.1.0 oslo.db>=4.27.0 oslo.i18n>=3.15.3 oslo.log>=3.36.0 oslo.serialization!=2.19.1,>=2.18.0 oslo.service!=1.28.1,>=1.24.0 oslo.utils>=3.33.0 oslo.vmware>=2.17.0 PrettyTable<0.8,>=0.7.1 tooz>=1.58.0 decorator>=3.4.0 vmware-nsx-12.0.1/vmware_nsx.egg-info/dependency_links.txt0000664000175100017510000000000113244524575023672 0ustar zuulzuul00000000000000 vmware-nsx-12.0.1/vmware_nsx.egg-info/pbr.json0000664000175100017510000000005613244524575021303 0ustar zuulzuul00000000000000{"git_version": "e34784c", "is_release": true}vmware-nsx-12.0.1/vmware_nsx.egg-info/SOURCES.txt0000664000175100017510000007111113244524600021476 0ustar zuulzuul00000000000000.coveragerc .mailmap .pylintrc .testr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MANIFEST.in README.rst TESTING.rst babel.cfg requirements.txt run_tests.sh setup.cfg setup.py test-requirements.txt tox.ini api-ref/rest.md devstack/README.rst devstack/localrc_nsx_v3 devstack/override-defaults devstack/plugin.sh devstack/settings devstack/lib/nsx_common devstack/lib/vmware_dvs devstack/lib/vmware_nsx devstack/lib/vmware_nsx_tvd devstack/lib/vmware_nsx_v devstack/lib/vmware_nsx_v3 devstack/nsx_v/devstackgaterc devstack/nsx_v/tvd_devstackgaterc devstack/nsx_v3/controller_local.conf.sample devstack/nsx_v3/devstackgaterc devstack/nsx_v3/kvm_compute_local.conf.sample devstack/tools/nsxv3_cleanup.py devstack/tools/nsxv_cleanup.py devstack/tools/nsxv_edge_resources.py devstack/tools/nsxv_fw_autodraft_setting.py doc/requirements.txt doc/source/admin_util.rst doc/source/conf.py doc/source/contributing.rst doc/source/devstack.rst doc/source/history.rst doc/source/housekeeper.rst doc/source/index.rst doc/source/installation.rst doc/source/readme.rst doc/source/usage.rst etc/README.txt etc/policy.json etc/oslo-config-generator/nsx.ini etc/policy.d/dynamic-routing.json etc/policy.d/flow-classifier.json etc/policy.d/network-gateways.json etc/policy.d/neutron-fwaas.json etc/policy.d/routers.json etc/policy.d/security-groups.json releasenotes/notes/.placeholder releasenotes/notes/bind-floating-ips-per-az-142f0de7ebfae1c8.yaml releasenotes/notes/block-all-no-security-groups-47af550349dbc85a.yaml releasenotes/notes/dns-search-domain-configuration-a134af0ef028282c.yaml releasenotes/notes/dvs_dns_integration-831224f15acbc728.yaml releasenotes/notes/ens_support-49dbc626ba1b16be.yaml releasenotes/notes/fwaas_v2-9445ea0aaea91c60.yaml releasenotes/notes/nsx-dns-integration-extension-8260456051d61743.yaml releasenotes/notes/nsx-extension-drivers-b1aedabe5296d4d0.yaml releasenotes/notes/nsxv-availability-zones-85db159a647762b3.yaml releasenotes/notes/nsxv-bgp-support-44f857d382943e08.yaml releasenotes/notes/nsxv-edge-random-placement-9534371967edec8f.yaml releasenotes/notes/nsxv-exclusive-dhcp-7e5cde1cd88f8c5b.yaml releasenotes/notes/nsxv-fwaas-driver-4c457dee3fc3bae2.yaml releasenotes/notes/nsxv-ipam-support-6eb1ac4e0e025ddd.yaml releasenotes/notes/nsxv-lbaas-l7-704f748300d1a399.yaml releasenotes/notes/nsxv-policy-3f552191f94873cd.yaml releasenotes/notes/nsxv-router-flavors-8e4cea7f6e12d44d.yaml releasenotes/notes/nsxv-service-insertion-32ab34a0e0f6ab4f.yaml releasenotes/notes/nsxv-subnets-dhcp-mtu-c7028748b516422e.yaml releasenotes/notes/nsxv-vlan-selection-ec73aac44b3648a1.yaml releasenotes/notes/nsxv3-add-trunk-driver-925ad1205972cbdf.yaml releasenotes/notes/nsxv3-availability-zones-8decf892df62.yaml releasenotes/notes/nsxv3-dhcp-relay-32cf1ae281e1.yaml releasenotes/notes/nsxv3-init-from-tags-bcd4f3245a78e9a6.yaml releasenotes/notes/nsxv3-ipam-support-137174152c65459d.yaml releasenotes/notes/nsxv3-lbaasv2-driver-57f37d6614eb1510.yaml releasenotes/notes/nsxv3-multi-managers-b645c4202a8476e9.yaml releasenotes/notes/nsxv3-native-dhcp-config-2b6bdd372a2d643f.yaml releasenotes/notes/nsxv3-native-dhcp-metadata-27af1de98302162f.yaml releasenotes/notes/nsxv3-switching-profiles-250aa43f5070dc37.yaml releasenotes/notes/nsxv3-taas-driver-1a316cf3915fcb3d.yaml releasenotes/notes/nsxv3-trnasparent-vlan-fe06e1d3aa2fbcd9.yaml releasenotes/notes/nsxv3-update-provider-types-aa1c20e988878ffe.yaml releasenotes/notes/nsxv3-vpnaas-0b02762ff4b83904.yaml releasenotes/notes/provider-security-group-2cfc1231dcaf21ac.yaml releasenotes/notes/qos-support-d52b5e3abfc6c8d4.yaml releasenotes/notes/rename_uuid_config_params-b36c379f64838334.yaml releasenotes/notes/rename_uuid_to_name-e64699df75176d4d.yaml releasenotes/notes/universal-switch-41487c280ad3c8ad.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder tools/__init__.py tools/clean.sh tools/coding-checks.sh tools/generate_config_file_samples.sh tools/install_venv.py tools/install_venv_common.py tools/misc-sanity-checks.sh tools/ostestr_compat_shim.sh tools/test-setup.sh tools/tox_install.sh tools/tox_install_project.sh tools/with_venv.sh vmware_nsx/__init__.py vmware_nsx/_i18n.py vmware_nsx/check_nsx_config.py vmware_nsx/nsx_cluster.py vmware_nsx/opts.py vmware_nsx/plugin.py vmware_nsx/version.py vmware_nsx.egg-info/PKG-INFO vmware_nsx.egg-info/SOURCES.txt vmware_nsx.egg-info/dependency_links.txt vmware_nsx.egg-info/entry_points.txt vmware_nsx.egg-info/not-zip-safe vmware_nsx.egg-info/pbr.json vmware_nsx.egg-info/requires.txt vmware_nsx.egg-info/top_level.txt vmware_nsx/api_client/__init__.py vmware_nsx/api_client/base.py vmware_nsx/api_client/client.py vmware_nsx/api_client/eventlet_client.py vmware_nsx/api_client/eventlet_request.py vmware_nsx/api_client/exception.py vmware_nsx/api_client/request.py vmware_nsx/api_client/version.py vmware_nsx/api_replay/__init__.py vmware_nsx/api_replay/cli.py vmware_nsx/api_replay/client.py vmware_nsx/api_replay/utils.py vmware_nsx/common/__init__.py vmware_nsx/common/availability_zones.py vmware_nsx/common/config.py vmware_nsx/common/driver_api.py vmware_nsx/common/exceptions.py vmware_nsx/common/l3_rpc_agent_api.py vmware_nsx/common/locking.py vmware_nsx/common/managers.py vmware_nsx/common/nsx_constants.py vmware_nsx/common/nsx_utils.py vmware_nsx/common/nsxv_constants.py vmware_nsx/common/securitygroups.py vmware_nsx/common/sync.py vmware_nsx/common/utils.py vmware_nsx/db/__init__.py vmware_nsx/db/db.py vmware_nsx/db/distributedrouter.py vmware_nsx/db/extended_security_group.py vmware_nsx/db/extended_security_group_rule.py vmware_nsx/db/lsn_db.py vmware_nsx/db/maclearning.py vmware_nsx/db/networkgw_db.py vmware_nsx/db/nsx_models.py vmware_nsx/db/nsx_portbindings_db.py vmware_nsx/db/nsxrouter.py vmware_nsx/db/nsxv_db.py vmware_nsx/db/nsxv_models.py vmware_nsx/db/qos_db.py vmware_nsx/db/routertype.py vmware_nsx/db/vcns_models.py vmware_nsx/db/vnic_index_db.py vmware_nsx/db/migration/__init__.py vmware_nsx/db/migration/alembic_migrations/__init__.py vmware_nsx/db/migration/alembic_migrations/env.py vmware_nsx/db/migration/alembic_migrations/script.py.mako vmware_nsx/db/migration/alembic_migrations/versions/CONTRACT_HEAD vmware_nsx/db/migration/alembic_migrations/versions/EXPAND_HEAD vmware_nsx/db/migration/alembic_migrations/versions/kilo_release.py vmware_nsx/db/migration/alembic_migrations/versions/liberty/contract/393bf843b96_initial_liberty_no_op_contract_script.py vmware_nsx/db/migration/alembic_migrations/versions/liberty/contract/3c88bdea3054_nsxv_vdr_dhcp_binding.py vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/279b70ac3ae8_nsxv3_add_l2gwconnection_table.py vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/28430956782d_nsxv3_security_groups.py vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/53a3254aa95e_initial_liberty_no_op_expand_script.py vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/20483029f1ff_update_tz_network_bindings.py vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/2af850eb3970_update_nsxv_tz_binding_type.py vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/312211a5725f_nsxv_lbv2.py vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/4c45bcadccf9_extend_secgroup_rule.py vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/69fb78b33d41_nsxv_add_search_domain_to_subnets.py vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/081af0e396d7_nsx_extended_rule_table_rename.py vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/5ed1ffbc0d2a_nsx_security_group_logging.py vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/d49ac91b560e_nsxv_lbaasv2_shared_pools.py vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/dbe29d208ac6_nsxv_add_dhcp_mtu_to_subnets.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/1b4eaffe4f31_nsx_provider_security_group.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/2c87aedb206f_nsxv_security_group_logging.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/3e4dccfe6fb4_nsx_security_group_logging.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/5e564e781d77_add_nsx_binding_type.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/633514d94b93_add_support_for_taas.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/6e6da8296c0e_add_nsxv_ipam.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/7b5ec3caa9a4_nsxv_fix_az_default.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/7e46906f8997_lbaas_foreignkeys.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/86a55205337c_nsxv_availability_zone_router_mapping.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/967462f585e1_add_dvs_id_to_switch_mappings.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/aede17d51d0f_timestamps.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/b7f41687cbad_nsxv3_qos_policy_mapping.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/c288bb6a7252_nsxv_add_resource_pool_to_router_mapping.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/c644ec62c585_nsxv3_add_nsx_dhcp_service_tables.py vmware_nsx/db/migration/alembic_migrations/versions/ocata/contract/14a89ddf96e2_add_az_internal_network.py vmware_nsx/db/migration/alembic_migrations/versions/ocata/contract/5c8f451290b7_nsx_ipam_table_rename.py vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/01a33f93f5fd_nsxv_lbv2_l7pol.py vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/dd9fe5a3a526_nsx_add_certificate_table.py vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/e816d4fe9d4f_nsx_add_policy_security_group.py vmware_nsx/db/migration/alembic_migrations/versions/pike/contract/84ceffa27115_nsxv3_qos_policy_no_foreign_key.py vmware_nsx/db/migration/alembic_migrations/versions/pike/contract/8c0a81a07691_fix_ipam_table.py vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/53eb497903a4_drop_vdr_dhcp_bindings.py vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/7c4704ad37df_nsxv_lbv2_l7pol_fix.py vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/8699700cd95c_nsxv_bgp_speaker_mapping.py vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/e4c503f4133f_port_vnic_type_support.py vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/ea7a72ab9643_nsxv3_lbaas_mapping.py vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/717f7f63a219_nsxv3_lbaas_l7policy.py vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/a1be06050b41_update_nsx_binding_types.py vmware_nsx/db/migration/alembic_migrations/versions/queens/expand/0dbeda408e41_nsxv3_vpn_mapping.py vmware_nsx/db/migration/alembic_migrations/versions/queens/expand/9799427fc0e1_nsx_tv_map.py vmware_nsx/db/migration/models/__init__.py vmware_nsx/db/migration/models/head.py vmware_nsx/dhcp_meta/__init__.py vmware_nsx/dhcp_meta/combined.py vmware_nsx/dhcp_meta/constants.py vmware_nsx/dhcp_meta/lsnmanager.py vmware_nsx/dhcp_meta/migration.py vmware_nsx/dhcp_meta/modes.py vmware_nsx/dhcp_meta/nsx.py vmware_nsx/dhcp_meta/rpc.py vmware_nsx/dvs/__init__.py vmware_nsx/dvs/dvs.py vmware_nsx/dvs/dvs_utils.py vmware_nsx/extension_drivers/__init__.py vmware_nsx/extension_drivers/dns_integration.py vmware_nsx/extensions/__init__.py vmware_nsx/extensions/advancedserviceproviders.py vmware_nsx/extensions/api_replay.py vmware_nsx/extensions/dhcp_mtu.py vmware_nsx/extensions/dns_search_domain.py vmware_nsx/extensions/edge_service_gateway_bgp_peer.py vmware_nsx/extensions/housekeeper.py vmware_nsx/extensions/lsn.py vmware_nsx/extensions/maclearning.py vmware_nsx/extensions/networkgw.py vmware_nsx/extensions/nsxpolicy.py vmware_nsx/extensions/projectpluginmap.py vmware_nsx/extensions/providersecuritygroup.py vmware_nsx/extensions/qos_queue.py vmware_nsx/extensions/routersize.py vmware_nsx/extensions/routertype.py vmware_nsx/extensions/secgroup_rule_local_ip_prefix.py vmware_nsx/extensions/securitygrouplogging.py vmware_nsx/extensions/securitygrouppolicy.py vmware_nsx/extensions/vnicindex.py vmware_nsx/nsxlib/__init__.py vmware_nsx/nsxlib/mh/__init__.py vmware_nsx/nsxlib/mh/l2gateway.py vmware_nsx/nsxlib/mh/lsn.py vmware_nsx/nsxlib/mh/queue.py vmware_nsx/nsxlib/mh/router.py vmware_nsx/nsxlib/mh/secgroup.py vmware_nsx/nsxlib/mh/switch.py vmware_nsx/nsxlib/mh/versioning.py vmware_nsx/osc/__init__.py vmware_nsx/osc/plugin.py vmware_nsx/osc/v2/__init__.py vmware_nsx/osc/v2/port.py vmware_nsx/osc/v2/project_plugin_map.py vmware_nsx/osc/v2/router.py vmware_nsx/osc/v2/security_group.py vmware_nsx/osc/v2/subnet.py vmware_nsx/osc/v2/utils.py vmware_nsx/plugins/__init__.py vmware_nsx/plugins/common/__init__.py vmware_nsx/plugins/common/plugin.py vmware_nsx/plugins/common/housekeeper/__init__.py vmware_nsx/plugins/common/housekeeper/base_job.py vmware_nsx/plugins/common/housekeeper/housekeeper.py vmware_nsx/plugins/dvs/__init__.py vmware_nsx/plugins/dvs/dhcp.py vmware_nsx/plugins/dvs/plugin.py vmware_nsx/plugins/nsx/__init__.py vmware_nsx/plugins/nsx/plugin.py vmware_nsx/plugins/nsx/utils.py vmware_nsx/plugins/nsx_mh/__init__.py vmware_nsx/plugins/nsx_mh/plugin.py vmware_nsx/plugins/nsx_v/__init__.py vmware_nsx/plugins/nsx_v/availability_zones.py vmware_nsx/plugins/nsx_v/managers.py vmware_nsx/plugins/nsx_v/md_proxy.py vmware_nsx/plugins/nsx_v/plugin.py vmware_nsx/plugins/nsx_v/drivers/__init__.py vmware_nsx/plugins/nsx_v/drivers/abstract_router_driver.py vmware_nsx/plugins/nsx_v/drivers/distributed_router_driver.py vmware_nsx/plugins/nsx_v/drivers/exclusive_router_driver.py vmware_nsx/plugins/nsx_v/drivers/shared_router_driver.py vmware_nsx/plugins/nsx_v/housekeeper/__init__.py vmware_nsx/plugins/nsx_v/housekeeper/error_backup_edge.py vmware_nsx/plugins/nsx_v/housekeeper/error_dhcp_edge.py vmware_nsx/plugins/nsx_v/vshield/__init__.py vmware_nsx/plugins/nsx_v/vshield/edge_appliance_driver.py vmware_nsx/plugins/nsx_v/vshield/edge_dynamic_routing_driver.py vmware_nsx/plugins/nsx_v/vshield/edge_firewall_driver.py vmware_nsx/plugins/nsx_v/vshield/edge_ipsecvpn_driver.py vmware_nsx/plugins/nsx_v/vshield/edge_utils.py vmware_nsx/plugins/nsx_v/vshield/nsxv_edge_cfg_obj.py vmware_nsx/plugins/nsx_v/vshield/nsxv_loadbalancer.py vmware_nsx/plugins/nsx_v/vshield/securitygroup_utils.py vmware_nsx/plugins/nsx_v/vshield/vcns.py vmware_nsx/plugins/nsx_v/vshield/vcns_driver.py vmware_nsx/plugins/nsx_v/vshield/common/VcnsApiClient.py vmware_nsx/plugins/nsx_v/vshield/common/__init__.py vmware_nsx/plugins/nsx_v/vshield/common/constants.py vmware_nsx/plugins/nsx_v/vshield/common/exceptions.py vmware_nsx/plugins/nsx_v/vshield/tasks/__init__.py vmware_nsx/plugins/nsx_v/vshield/tasks/constants.py vmware_nsx/plugins/nsx_v/vshield/tasks/tasks.py vmware_nsx/plugins/nsx_v3/__init__.py vmware_nsx/plugins/nsx_v3/availability_zones.py vmware_nsx/plugins/nsx_v3/cert_utils.py vmware_nsx/plugins/nsx_v3/plugin.py vmware_nsx/plugins/nsx_v3/utils.py vmware_nsx/plugins/nsx_v3/api_replay/__init__.py vmware_nsx/services/__init__.py vmware_nsx/services/dynamic_routing/__init__.py vmware_nsx/services/dynamic_routing/bgp_plugin.py vmware_nsx/services/dynamic_routing/nsx_v/__init__.py vmware_nsx/services/dynamic_routing/nsx_v/driver.py vmware_nsx/services/flowclassifier/__init__.py vmware_nsx/services/flowclassifier/nsx_v/__init__.py vmware_nsx/services/flowclassifier/nsx_v/driver.py vmware_nsx/services/flowclassifier/nsx_v/utils.py vmware_nsx/services/fwaas/__init__.py vmware_nsx/services/fwaas/common/__init__.py vmware_nsx/services/fwaas/common/fwaas_callbacks_v1.py vmware_nsx/services/fwaas/common/fwaas_callbacks_v2.py vmware_nsx/services/fwaas/common/utils.py vmware_nsx/services/fwaas/nsx_tv/__init__.py vmware_nsx/services/fwaas/nsx_tv/edge_fwaas_driver_v1.py vmware_nsx/services/fwaas/nsx_tv/edge_fwaas_driver_v2.py vmware_nsx/services/fwaas/nsx_tv/plugin_v1.py vmware_nsx/services/fwaas/nsx_tv/plugin_v2.py vmware_nsx/services/fwaas/nsx_v/__init__.py vmware_nsx/services/fwaas/nsx_v/edge_fwaas_driver.py vmware_nsx/services/fwaas/nsx_v/fwaas_callbacks.py vmware_nsx/services/fwaas/nsx_v3/__init__.py vmware_nsx/services/fwaas/nsx_v3/edge_fwaas_driver_base.py vmware_nsx/services/fwaas/nsx_v3/edge_fwaas_driver_v1.py vmware_nsx/services/fwaas/nsx_v3/edge_fwaas_driver_v2.py vmware_nsx/services/fwaas/nsx_v3/fwaas_callbacks_v1.py vmware_nsx/services/fwaas/nsx_v3/fwaas_callbacks_v2.py vmware_nsx/services/ipam/__init__.py vmware_nsx/services/ipam/common/__init__.py vmware_nsx/services/ipam/common/driver.py vmware_nsx/services/ipam/nsx_tvd/__init__.py vmware_nsx/services/ipam/nsx_tvd/driver.py vmware_nsx/services/ipam/nsx_v/__init__.py vmware_nsx/services/ipam/nsx_v/driver.py vmware_nsx/services/ipam/nsx_v3/__init__.py vmware_nsx/services/ipam/nsx_v3/driver.py vmware_nsx/services/l2gateway/__init__.py vmware_nsx/services/l2gateway/nsx_tvd/__init__.py vmware_nsx/services/l2gateway/nsx_tvd/driver.py vmware_nsx/services/l2gateway/nsx_tvd/plugin.py vmware_nsx/services/l2gateway/nsx_v/__init__.py vmware_nsx/services/l2gateway/nsx_v/driver.py vmware_nsx/services/l2gateway/nsx_v3/__init__.py vmware_nsx/services/l2gateway/nsx_v3/driver.py vmware_nsx/services/lbaas/__init__.py vmware_nsx/services/lbaas/base_mgr.py vmware_nsx/services/lbaas/lb_const.py vmware_nsx/services/lbaas/nsx/__init__.py vmware_nsx/services/lbaas/nsx/lb_driver_v2.py vmware_nsx/services/lbaas/nsx/plugin.py vmware_nsx/services/lbaas/nsx_v/__init__.py vmware_nsx/services/lbaas/nsx_v/lbaas_common.py vmware_nsx/services/lbaas/nsx_v/v2/__init__.py vmware_nsx/services/lbaas/nsx_v/v2/edge_loadbalancer_driver_v2.py vmware_nsx/services/lbaas/nsx_v/v2/healthmon_mgr.py vmware_nsx/services/lbaas/nsx_v/v2/l7policy_mgr.py vmware_nsx/services/lbaas/nsx_v/v2/l7rule_mgr.py vmware_nsx/services/lbaas/nsx_v/v2/listener_mgr.py vmware_nsx/services/lbaas/nsx_v/v2/loadbalancer_mgr.py vmware_nsx/services/lbaas/nsx_v/v2/member_mgr.py vmware_nsx/services/lbaas/nsx_v/v2/pool_mgr.py vmware_nsx/services/lbaas/nsx_v3/__init__.py vmware_nsx/services/lbaas/nsx_v3/healthmonitor_mgr.py vmware_nsx/services/lbaas/nsx_v3/l7policy_mgr.py vmware_nsx/services/lbaas/nsx_v3/l7rule_mgr.py vmware_nsx/services/lbaas/nsx_v3/lb_driver_v2.py vmware_nsx/services/lbaas/nsx_v3/lb_utils.py vmware_nsx/services/lbaas/nsx_v3/listener_mgr.py vmware_nsx/services/lbaas/nsx_v3/loadbalancer_mgr.py vmware_nsx/services/lbaas/nsx_v3/member_mgr.py vmware_nsx/services/lbaas/nsx_v3/pool_mgr.py vmware_nsx/services/qos/__init__.py vmware_nsx/services/qos/common/__init__.py vmware_nsx/services/qos/common/utils.py vmware_nsx/services/qos/nsx_tvd/__init__.py vmware_nsx/services/qos/nsx_tvd/plugin.py vmware_nsx/services/qos/nsx_v/__init__.py vmware_nsx/services/qos/nsx_v/driver.py vmware_nsx/services/qos/nsx_v/plugin.py vmware_nsx/services/qos/nsx_v/utils.py vmware_nsx/services/qos/nsx_v3/__init__.py vmware_nsx/services/qos/nsx_v3/driver.py vmware_nsx/services/qos/nsx_v3/message_queue.py vmware_nsx/services/qos/nsx_v3/utils.py vmware_nsx/services/trunk/__init__.py vmware_nsx/services/trunk/nsx_v3/__init__.py vmware_nsx/services/trunk/nsx_v3/driver.py vmware_nsx/services/vpnaas/__init__.py vmware_nsx/services/vpnaas/nsx_tvd/__init__.py vmware_nsx/services/vpnaas/nsx_tvd/ipsec_driver.py vmware_nsx/services/vpnaas/nsx_tvd/ipsec_validator.py vmware_nsx/services/vpnaas/nsx_tvd/plugin.py vmware_nsx/services/vpnaas/nsxv/__init__.py vmware_nsx/services/vpnaas/nsxv/ipsec_driver.py vmware_nsx/services/vpnaas/nsxv/ipsec_validator.py vmware_nsx/services/vpnaas/nsxv3/__init__.py vmware_nsx/services/vpnaas/nsxv3/ipsec_driver.py vmware_nsx/services/vpnaas/nsxv3/ipsec_utils.py vmware_nsx/services/vpnaas/nsxv3/ipsec_validator.py vmware_nsx/shell/__init__.py vmware_nsx/shell/commands.py vmware_nsx/shell/hk_trigger.sh vmware_nsx/shell/nsx_instance_if_migrate.py vmware_nsx/shell/nsxadmin.py vmware_nsx/shell/resources.py vmware_nsx/shell/admin/README.rst vmware_nsx/shell/admin/__init__.py vmware_nsx/shell/admin/version.py vmware_nsx/shell/admin/plugins/__init__.py vmware_nsx/shell/admin/plugins/common/__init__.py vmware_nsx/shell/admin/plugins/common/constants.py vmware_nsx/shell/admin/plugins/common/formatters.py vmware_nsx/shell/admin/plugins/common/utils.py vmware_nsx/shell/admin/plugins/nsxtvd/__init__.py vmware_nsx/shell/admin/plugins/nsxtvd/resources/__init__.py vmware_nsx/shell/admin/plugins/nsxtvd/resources/migrate.py vmware_nsx/shell/admin/plugins/nsxv/__init__.py vmware_nsx/shell/admin/plugins/nsxv/resources/__init__.py vmware_nsx/shell/admin/plugins/nsxv/resources/backup_edges.py vmware_nsx/shell/admin/plugins/nsxv/resources/config.py vmware_nsx/shell/admin/plugins/nsxv/resources/dhcp_binding.py vmware_nsx/shell/admin/plugins/nsxv/resources/edges.py vmware_nsx/shell/admin/plugins/nsxv/resources/gw_edges.py vmware_nsx/shell/admin/plugins/nsxv/resources/metadata.py vmware_nsx/shell/admin/plugins/nsxv/resources/networks.py vmware_nsx/shell/admin/plugins/nsxv/resources/routers.py vmware_nsx/shell/admin/plugins/nsxv/resources/securitygroups.py vmware_nsx/shell/admin/plugins/nsxv/resources/spoofguard_policy.py vmware_nsx/shell/admin/plugins/nsxv/resources/utils.py vmware_nsx/shell/admin/plugins/nsxv3/__init__.py vmware_nsx/shell/admin/plugins/nsxv3/resources/__init__.py vmware_nsx/shell/admin/plugins/nsxv3/resources/certificates.py vmware_nsx/shell/admin/plugins/nsxv3/resources/config.py vmware_nsx/shell/admin/plugins/nsxv3/resources/dhcp_binding.py vmware_nsx/shell/admin/plugins/nsxv3/resources/dhcp_servers.py vmware_nsx/shell/admin/plugins/nsxv3/resources/http_service.py vmware_nsx/shell/admin/plugins/nsxv3/resources/loadbalancer.py vmware_nsx/shell/admin/plugins/nsxv3/resources/metadata_proxy.py vmware_nsx/shell/admin/plugins/nsxv3/resources/networks.py vmware_nsx/shell/admin/plugins/nsxv3/resources/ports.py vmware_nsx/shell/admin/plugins/nsxv3/resources/routers.py vmware_nsx/shell/admin/plugins/nsxv3/resources/securitygroups.py vmware_nsx/shell/admin/plugins/nsxv3/resources/utils.py vmware_nsx/tests/__init__.py vmware_nsx/tests/functional/__init__.py vmware_nsx/tests/functional/requirements.txt vmware_nsx/tests/unit/__init__.py vmware_nsx/tests/unit/test_utils.py vmware_nsx/tests/unit/db/__init__.py vmware_nsx/tests/unit/db/test_migrations.py vmware_nsx/tests/unit/dvs/__init__.py vmware_nsx/tests/unit/dvs/test_plugin.py vmware_nsx/tests/unit/dvs/test_utils.py vmware_nsx/tests/unit/etc/fake_get_gwservice.json vmware_nsx/tests/unit/etc/fake_get_lqueue.json vmware_nsx/tests/unit/etc/fake_get_lrouter.json vmware_nsx/tests/unit/etc/fake_get_lrouter_lport.json vmware_nsx/tests/unit/etc/fake_get_lrouter_lport_att.json vmware_nsx/tests/unit/etc/fake_get_lrouter_nat.json vmware_nsx/tests/unit/etc/fake_get_lswitch.json vmware_nsx/tests/unit/etc/fake_get_lswitch_lport.json vmware_nsx/tests/unit/etc/fake_get_lswitch_lport_att.json vmware_nsx/tests/unit/etc/fake_get_lswitch_lport_status.json vmware_nsx/tests/unit/etc/fake_get_security_profile.json vmware_nsx/tests/unit/etc/fake_post_gwservice.json vmware_nsx/tests/unit/etc/fake_post_lqueue.json vmware_nsx/tests/unit/etc/fake_post_lrouter.json vmware_nsx/tests/unit/etc/fake_post_lrouter_lport.json vmware_nsx/tests/unit/etc/fake_post_lrouter_nat.json vmware_nsx/tests/unit/etc/fake_post_lswitch.json vmware_nsx/tests/unit/etc/fake_post_lswitch_lport.json vmware_nsx/tests/unit/etc/fake_post_security_profile.json vmware_nsx/tests/unit/etc/fake_put_lrouter_lport_att.json vmware_nsx/tests/unit/etc/fake_put_lswitch_lport_att.json vmware_nsx/tests/unit/etc/neutron.conf.test vmware_nsx/tests/unit/etc/nsx.ini.agentless.test vmware_nsx/tests/unit/etc/nsx.ini.basic.test vmware_nsx/tests/unit/etc/nsx.ini.combined.test vmware_nsx/tests/unit/etc/nsx.ini.full.test vmware_nsx/tests/unit/etc/nsx.ini.test vmware_nsx/tests/unit/etc/nvp.ini.full.test vmware_nsx/tests/unit/etc/vcns.ini.test vmware_nsx/tests/unit/extension_drivers/__init__.py vmware_nsx/tests/unit/extension_drivers/test_dns_integration.py vmware_nsx/tests/unit/extensions/__init__.py vmware_nsx/tests/unit/extensions/test_addresspairs.py vmware_nsx/tests/unit/extensions/test_dhcp_mtu.py vmware_nsx/tests/unit/extensions/test_dns_search_domain.py vmware_nsx/tests/unit/extensions/test_maclearning.py vmware_nsx/tests/unit/extensions/test_metadata.py vmware_nsx/tests/unit/extensions/test_networkgw.py vmware_nsx/tests/unit/extensions/test_portsecurity.py vmware_nsx/tests/unit/extensions/test_provider_security_groups.py vmware_nsx/tests/unit/extensions/test_providernet.py vmware_nsx/tests/unit/extensions/test_qosqueues.py vmware_nsx/tests/unit/extensions/test_secgroup_rule_local_ip_prefix.py vmware_nsx/tests/unit/extensions/test_security_group_policy.py vmware_nsx/tests/unit/extensions/test_securitygroup.py vmware_nsx/tests/unit/extensions/test_vnic_index.py vmware_nsx/tests/unit/nsx_mh/__init__.py vmware_nsx/tests/unit/nsx_mh/test_dhcpmeta.py vmware_nsx/tests/unit/nsx_mh/test_opts.py vmware_nsx/tests/unit/nsx_mh/test_plugin.py vmware_nsx/tests/unit/nsx_mh/test_sync.py vmware_nsx/tests/unit/nsx_mh/test_utils.py vmware_nsx/tests/unit/nsx_mh/apiclient/__init__.py vmware_nsx/tests/unit/nsx_mh/apiclient/fake.py vmware_nsx/tests/unit/nsx_mh/apiclient/test_api_common.py vmware_nsx/tests/unit/nsx_mh/apiclient/test_api_eventlet_request.py vmware_nsx/tests/unit/nsx_mh/db/__init__.py vmware_nsx/tests/unit/nsx_mh/db/test_lsn_db.py vmware_nsx/tests/unit/nsx_mh/db/test_nsx_db.py vmware_nsx/tests/unit/nsx_tvd/__init__.py vmware_nsx/tests/unit/nsx_tvd/test_plugin.py vmware_nsx/tests/unit/nsx_v/__init__.py vmware_nsx/tests/unit/nsx_v/test_availability_zones.py vmware_nsx/tests/unit/nsx_v/test_edge_loadbalancer_driver_v2.py vmware_nsx/tests/unit/nsx_v/test_fwaas_driver.py vmware_nsx/tests/unit/nsx_v/test_lbaas_common.py vmware_nsx/tests/unit/nsx_v/test_md_proxy.py vmware_nsx/tests/unit/nsx_v/test_misc.py vmware_nsx/tests/unit/nsx_v/test_nsxv_loadbalancer.py vmware_nsx/tests/unit/nsx_v/test_plugin.py vmware_nsx/tests/unit/nsx_v/housekeeper/__init__.py vmware_nsx/tests/unit/nsx_v/housekeeper/test_error_backup_edge.py vmware_nsx/tests/unit/nsx_v/housekeeper/test_error_dhcp_edge.py vmware_nsx/tests/unit/nsx_v/vshield/__init__.py vmware_nsx/tests/unit/nsx_v/vshield/fake_vcns.py vmware_nsx/tests/unit/nsx_v/vshield/test_edge_utils.py vmware_nsx/tests/unit/nsx_v/vshield/test_vcns_driver.py vmware_nsx/tests/unit/nsx_v3/__init__.py vmware_nsx/tests/unit/nsx_v3/test_api_replay.py vmware_nsx/tests/unit/nsx_v3/test_availability_zones.py vmware_nsx/tests/unit/nsx_v3/test_client_cert.py vmware_nsx/tests/unit/nsx_v3/test_constants.py vmware_nsx/tests/unit/nsx_v3/test_dhcp_metadata.py vmware_nsx/tests/unit/nsx_v3/test_fwaas_v1_driver.py vmware_nsx/tests/unit/nsx_v3/test_fwaas_v2_driver.py vmware_nsx/tests/unit/nsx_v3/test_plugin.py vmware_nsx/tests/unit/nsxlib/__init__.py vmware_nsx/tests/unit/nsxlib/mh/__init__.py vmware_nsx/tests/unit/nsxlib/mh/base.py vmware_nsx/tests/unit/nsxlib/mh/test_l2gateway.py vmware_nsx/tests/unit/nsxlib/mh/test_lsn.py vmware_nsx/tests/unit/nsxlib/mh/test_queue.py vmware_nsx/tests/unit/nsxlib/mh/test_router.py vmware_nsx/tests/unit/nsxlib/mh/test_secgroup.py vmware_nsx/tests/unit/nsxlib/mh/test_switch.py vmware_nsx/tests/unit/nsxlib/mh/test_versioning.py vmware_nsx/tests/unit/osc/__init__.py vmware_nsx/tests/unit/osc/v2/__init__.py vmware_nsx/tests/unit/osc/v2/test_port.py vmware_nsx/tests/unit/osc/v2/test_router.py vmware_nsx/tests/unit/osc/v2/test_security_group.py vmware_nsx/tests/unit/osc/v2/test_subnet.py vmware_nsx/tests/unit/services/__init__.py vmware_nsx/tests/unit/services/dynamic_routing/__init__.py vmware_nsx/tests/unit/services/dynamic_routing/test_nsxv_bgp_driver.py vmware_nsx/tests/unit/services/flowclassifier/__init__.py vmware_nsx/tests/unit/services/flowclassifier/test_nsxv_driver.py vmware_nsx/tests/unit/services/ipam/__init__.py vmware_nsx/tests/unit/services/ipam/test_nsxv3_driver.py vmware_nsx/tests/unit/services/ipam/test_nsxv_driver.py vmware_nsx/tests/unit/services/l2gateway/__init__.py vmware_nsx/tests/unit/services/l2gateway/test_nsxv3_driver.py vmware_nsx/tests/unit/services/l2gateway/test_nsxv_driver.py vmware_nsx/tests/unit/services/lbaas/__init__.py vmware_nsx/tests/unit/services/lbaas/test_nsxv3_driver.py vmware_nsx/tests/unit/services/qos/__init__.py vmware_nsx/tests/unit/services/qos/test_nsxv3_notification.py vmware_nsx/tests/unit/services/qos/test_nsxv_notification.py vmware_nsx/tests/unit/services/trunk/__init__.py vmware_nsx/tests/unit/services/trunk/test_nsxv3_driver.py vmware_nsx/tests/unit/services/vpnaas/__init__.py vmware_nsx/tests/unit/services/vpnaas/test_nsxv3_vpnaas.py vmware_nsx/tests/unit/services/vpnaas/test_nsxv_vpnaas.py vmware_nsx/tests/unit/shell/__init__.py vmware_nsx/tests/unit/shell/test_admin_utils.pyvmware-nsx-12.0.1/test-requirements.txt0000666000175100017510000000156413244523345020204 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD flake8-import-order==0.12 # LGPLv3 mock>=2.0.0 # BSD psycopg2>=2.6.2 # LGPL/ZPL PyMySQL>=0.7.6 # MIT License oslotest>=3.2.0 # Apache-2.0 oslo.privsep>=1.23.0 # Apache-2.0 pyroute2>=0.4.21;sys_platform!='win32' # Apache-2.0 (+ dual licensed GPL2) testrepository>=0.0.18 # Apache-2.0/BSD testresources>=2.0.0 # Apache-2.0/BSD testtools>=2.2.0 # MIT testscenarios>=0.4 # Apache-2.0/BSD WebTest>=2.0.27 # MIT bandit>=1.1.0 # Apache-2.0 tempest>=17.1.0 # Apache-2.0 pylint==1.4.5 # GPLv2 python-openstackclient>=3.12.0 # Apache-2.0 requests-mock>=1.1.0 # Apache-2.0 vmware-nsx-12.0.1/setup.py0000666000175100017510000000200613244523345015445 0ustar zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) vmware-nsx-12.0.1/vmware_nsx/0000775000175100017510000000000013244524600016117 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/api_replay/0000775000175100017510000000000013244524600020244 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/api_replay/utils.py0000666000175100017510000000336213244523413021765 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import attributes as lib_attrs from oslo_config import cfg from oslo_utils import uuidutils import webob.exc def _fixup_res_dict(context, attr_name, res_dict, check_allow_post=True): # This method is a replacement of _fixup_res_dict which is used in # neutron.plugin.common.utils. All this mock does is insert a uuid # for the id field if one is not found ONLY if running in api_replay_mode. if cfg.CONF.api_replay_mode and 'id' not in res_dict: res_dict['id'] = uuidutils.generate_uuid() attr_info = lib_attrs.RESOURCES[attr_name] attr_ops = lib_attrs.AttributeInfo(attr_info) try: attr_ops.populate_project_id(context, res_dict, True) lib_attrs.populate_project_info(attr_info) attr_ops.verify_attributes(res_dict) except webob.exc.HTTPBadRequest as e: # convert webob exception into ValueError as these functions are # for internal use. webob exception doesn't make sense. raise ValueError(e.detail) attr_ops.fill_post_defaults(res_dict, check_allow_post=check_allow_post) attr_ops.convert_values(res_dict) return res_dict vmware-nsx-12.0.1/vmware_nsx/api_replay/client.py0000666000175100017510000010133113244523413022076 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import six from keystoneauth1 import identity from keystoneauth1 import session from neutronclient.common import exceptions as n_exc from neutronclient.v2_0 import client from oslo_utils import excutils logging.basicConfig(level=logging.INFO) LOG = logging.getLogger(__name__) # For internal testing only use_old_keystone_on_dest = False class ApiReplayClient(object): basic_ignore_fields = ['updated_at', 'created_at', 'tags', 'revision', 'revision_number'] def __init__(self, source_os_username, source_os_user_domain_id, source_os_tenant_name, source_os_tenant_domain_id, source_os_password, source_os_auth_url, dest_os_username, dest_os_user_domain_id, dest_os_tenant_name, dest_os_tenant_domain_id, dest_os_password, dest_os_auth_url, use_old_keystone, logfile): if logfile: f_handler = logging.FileHandler(logfile) f_formatter = logging.Formatter( '%(asctime)s %(levelname)s %(message)s') f_handler.setFormatter(f_formatter) LOG.addHandler(f_handler) # connect to both clients if use_old_keystone: # Since we are not sure what keystone version will be used on the # source setup, we add an option to use the v2 client self.source_neutron = client.Client( username=source_os_username, tenant_name=source_os_tenant_name, password=source_os_password, auth_url=source_os_auth_url) else: self.source_neutron = self.connect_to_client( username=source_os_username, user_domain_id=source_os_user_domain_id, tenant_name=source_os_tenant_name, tenant_domain_id=source_os_tenant_domain_id, password=source_os_password, auth_url=source_os_auth_url) if use_old_keystone_on_dest: self.dest_neutron = client.Client( username=dest_os_username, tenant_name=dest_os_tenant_name, password=dest_os_password, auth_url=dest_os_auth_url) else: self.dest_neutron = self.connect_to_client( username=dest_os_username, user_domain_id=dest_os_user_domain_id, tenant_name=dest_os_tenant_name, tenant_domain_id=dest_os_tenant_domain_id, password=dest_os_password, auth_url=dest_os_auth_url) LOG.info("Starting NSX migration.") # Migrate all the objects self.migrate_security_groups() self.migrate_qos_policies() routers_routes, routers_gw_info = self.migrate_routers() self.migrate_networks_subnets_ports(routers_gw_info) self.migrate_floatingips() self.migrate_routers_routes(routers_routes) LOG.info("NSX migration is Done.") def connect_to_client(self, username, user_domain_id, tenant_name, tenant_domain_id, password, auth_url): auth = identity.Password(username=username, user_domain_id=user_domain_id, password=password, project_name=tenant_name, project_domain_id=tenant_domain_id, auth_url=auth_url) sess = session.Session(auth=auth) neutron = client.Client(session=sess) return neutron def find_subnet_by_id(self, subnet_id, subnets): for subnet in subnets: if subnet['id'] == subnet_id: return subnet def subnet_drop_ipv6_fields_if_v4(self, body): """ Drops v6 fields on subnets that are v4 as server doesn't allow them. """ v6_fields_to_remove = ['ipv6_address_mode', 'ipv6_ra_mode'] if body['ip_version'] != 4: return for field in v6_fields_to_remove: if field in body: body.pop(field) def get_ports_on_network(self, network_id, ports): """Returns all the ports on a given network_id.""" ports_on_network = [] for port in ports: if port['network_id'] == network_id: ports_on_network.append(port) return ports_on_network def have_id(self, id, groups): """If the sg_id is in groups return true else false.""" for group in groups: if id == group['id']: return group return False def drop_fields(self, item, drop_fields): body = {} for k, v in item.items(): if k in drop_fields: continue body[k] = v return body def fix_description(self, body): # neutron doesn't like description being None even though its # what it returns to us. if 'description' in body and body['description'] is None: body['description'] = '' def migrate_qos_rule(self, dest_policy, source_rule): """Add the QoS rule from the source to the QoS policy If there is already a rule of that type, skip it since the QoS policy can have only one rule of each type """ #TODO(asarfaty) also take rule direction into account once #ingress support is upstream rule_type = source_rule.get('type') dest_rules = dest_policy.get('rules') if dest_rules: for dest_rule in dest_rules: if dest_rule['type'] == rule_type: return pol_id = dest_policy['id'] drop_qos_rule_fields = ['revision', 'type', 'qos_policy_id', 'id'] body = self.drop_fields(source_rule, drop_qos_rule_fields) try: if rule_type == 'bandwidth_limit': rule = self.dest_neutron.create_bandwidth_limit_rule( pol_id, body={'bandwidth_limit_rule': body}) elif rule_type == 'dscp_marking': rule = self.dest_neutron.create_dscp_marking_rule( pol_id, body={'dscp_marking_rule': body}) else: LOG.info("QoS rule type %(rule)s is not supported for policy " "%(pol)s", {'rule': rule_type, 'pol': pol_id}) LOG.info("created QoS policy %s rule %s", pol_id, rule) except Exception as e: LOG.error("Failed to create QoS rule for policy %(pol)s: %(e)s", {'pol': pol_id, 'e': e}) def migrate_qos_policies(self): """Migrates QoS policies from source to dest neutron.""" # first fetch the QoS policies from both the # source and destination neutron server try: dest_qos_pols = self.dest_neutron.list_qos_policies()['policies'] except n_exc.NotFound: # QoS disabled on dest LOG.info("QoS is disabled on destination: ignoring QoS policies") self.dest_qos_support = False return self.dest_qos_support = True try: source_qos_pols = self.source_neutron.list_qos_policies()[ 'policies'] except n_exc.NotFound: # QoS disabled on source return drop_qos_policy_fields = ['revision'] for pol in source_qos_pols: dest_pol = self.have_id(pol['id'], dest_qos_pols) # If the policy already exists on the dest_neutron if dest_pol: # make sure all the QoS policy rules are there and # create them if not for qos_rule in pol['rules']: self.migrate_qos_rule(dest_pol, qos_rule) # dest server doesn't have the group so we create it here. else: qos_rules = pol.pop('rules') try: body = self.drop_fields(pol, drop_qos_policy_fields) self.fix_description(body) new_pol = self.dest_neutron.create_qos_policy( body={'policy': body}) except Exception as e: LOG.error("Failed to create QoS policy %(pol)s: %(e)s", {'pol': pol['id'], 'e': e}) continue else: LOG.info("Created QoS policy %s", new_pol) for qos_rule in qos_rules: self.migrate_qos_rule(new_pol['policy'], qos_rule) def migrate_security_groups(self): """Migrates security groups from source to dest neutron.""" # first fetch the security groups from both the # source and dest neutron server source_sec_groups = self.source_neutron.list_security_groups() dest_sec_groups = self.dest_neutron.list_security_groups() source_sec_groups = source_sec_groups['security_groups'] dest_sec_groups = dest_sec_groups['security_groups'] drop_sg_fields = self.basic_ignore_fields + ['policy'] total_num = len(source_sec_groups) LOG.info("Migrating %s security groups", total_num) for count, sg in enumerate(source_sec_groups, 1): dest_sec_group = self.have_id(sg['id'], dest_sec_groups) # If the security group already exists on the dest_neutron if dest_sec_group: # make sure all the security group rules are there and # create them if not for sg_rule in sg['security_group_rules']: if(self.have_id(sg_rule['id'], dest_sec_group['security_group_rules']) is False): try: body = self.drop_fields(sg_rule, drop_sg_fields) self.fix_description(body) self.dest_neutron.create_security_group_rule( {'security_group_rule': body}) except n_exc.Conflict: # NOTE(arosen): when you create a default # security group it is automatically populated # with some rules. When we go to create the rules # that already exist because of a match an error # is raised here but that's okay. pass # dest server doesn't have the group so we create it here. else: sg_rules = sg.pop('security_group_rules') try: body = self.drop_fields(sg, drop_sg_fields) self.fix_description(body) new_sg = self.dest_neutron.create_security_group( {'security_group': body}) LOG.info("Created security-group %(count)s/%(total)s: " "%(sg)s", {'count': count, 'total': total_num, 'sg': new_sg}) except Exception as e: LOG.error("Failed to create security group (%(sg)s): " "%(e)s", {'sg': sg, 'e': e}) # Note - policy security groups will have no rules, and will # be created on the destination with the default rules only for sg_rule in sg_rules: try: body = self.drop_fields(sg_rule, drop_sg_fields) self.fix_description(body) rule = self.dest_neutron.create_security_group_rule( {'security_group_rule': body}) LOG.debug("created security group rule %s", rule['id']) except Exception: # NOTE(arosen): when you create a default # security group it is automatically populated # with some rules. When we go to create the rules # that already exist because of a match an error # is raised here but that's okay. pass def migrate_routers(self): """Migrates routers from source to dest neutron. Also return a dictionary of the routes that should be added to each router. Static routes must be added later, after the router ports are set. And return a dictionary of external gateway info per router """ try: source_routers = self.source_neutron.list_routers()['routers'] except Exception: # L3 might be disabled in the source source_routers = [] dest_routers = self.dest_neutron.list_routers()['routers'] update_routes = {} gw_info = {} drop_router_fields = self.basic_ignore_fields + [ 'status', 'routes', 'ha', 'external_gateway_info', 'router_type', 'availability_zone_hints', 'availability_zones', 'distributed', 'flavor_id'] total_num = len(source_routers) LOG.info("Migrating %s routers", total_num) for count, router in enumerate(source_routers, 1): if router.get('routes'): update_routes[router['id']] = router['routes'] if router.get('external_gateway_info'): gw_info[router['id']] = router['external_gateway_info'] dest_router = self.have_id(router['id'], dest_routers) if dest_router is False: body = self.drop_fields(router, drop_router_fields) self.fix_description(body) try: new_router = (self.dest_neutron.create_router( {'router': body})) LOG.info("created router %(count)s/%(total)s: %(rtr)s", {'count': count, 'total': total_num, 'rtr': new_router}) except Exception as e: LOG.error("Failed to create router %(rtr)s: %(e)s", {'rtr': router, 'e': e}) return update_routes, gw_info def migrate_routers_routes(self, routers_routes): """Add static routes to the created routers.""" total_num = len(routers_routes) LOG.info("Migrating %s routers routes", total_num) for count, (router_id, routes) in enumerate( six.iteritems(routers_routes), 1): try: self.dest_neutron.update_router(router_id, {'router': {'routes': routes}}) LOG.info("Added routes to router %(rtr)s %(count)s/%(total)s:", {'count': count, 'total': total_num, 'rtr': router_id}) except Exception as e: LOG.error("Failed to add routes %(routes)s to router " "%(rtr)s: %(e)s", {'routes': routes, 'rtr': router_id, 'e': e}) def migrate_subnetpools(self): subnetpools_map = {} try: source_subnetpools = self.source_neutron.list_subnetpools()[ 'subnetpools'] except Exception: # pools not supported on source return subnetpools_map dest_subnetpools = self.dest_neutron.list_subnetpools()[ 'subnetpools'] drop_subnetpool_fields = self.basic_ignore_fields + [ 'id', 'ip_version'] for pool in source_subnetpools: # a default subnetpool (per ip-version) should be unique. # so do not create one if already exists if pool['is_default']: for dpool in dest_subnetpools: if (dpool['is_default'] and dpool['ip_version'] == pool['ip_version']): subnetpools_map[pool['id']] = dpool['id'] break else: old_id = pool['id'] body = self.drop_fields(pool, drop_subnetpool_fields) self.fix_description(body) if 'default_quota' in body and body['default_quota'] is None: del body['default_quota'] try: new_id = self.dest_neutron.create_subnetpool( {'subnetpool': body})['subnetpool']['id'] subnetpools_map[old_id] = new_id # refresh the list of existing subnetpools dest_subnetpools = self.dest_neutron.list_subnetpools()[ 'subnetpools'] except Exception as e: LOG.error("Failed to create subnetpool %(pool)s: %(e)s", {'pool': pool, 'e': e}) return subnetpools_map def fix_port(self, body): # remove allowed_address_pairs if empty: if ('allowed_address_pairs' in body and not body['allowed_address_pairs']): del body['allowed_address_pairs'] # remove port security if mac learning is enabled if (body.get('mac_learning_enabled') and body.get('port_security_enabled')): LOG.warning("Disabling port security of port %s: The plugin " "doesn't support mac learning with port security", body['id']) body['port_security_enabled'] = False body['security_groups'] = [] def fix_network(self, body, dest_default_public_net): # neutron doesn't like some fields being None even though its # what it returns to us. for field in ['provider:physical_network', 'provider:segmentation_id']: if field in body and body[field] is None: del body[field] # vxlan network with segmentation id should be translated to a regular # network in nsx-v3. if (body.get('provider:network_type') == 'vxlan' and body.get('provider:segmentation_id') is not None): del body['provider:network_type'] del body['provider:segmentation_id'] # flat network should be translated to a regular network in nsx-v3. if (body.get('provider:network_type') == 'flat'): del body['provider:network_type'] if 'provider:physical_network' in body: del body['provider:physical_network'] # external networks needs some special care if body.get('router:external'): fields_reset = False for field in ['provider:network_type', 'provider:segmentation_id', 'provider:physical_network']: if field in body: if body[field] is not None: fields_reset = True del body[field] if fields_reset: LOG.warning("Ignoring provider network fields while migrating " "external network %s", body['id']) if body.get('is_default') and dest_default_public_net: body['is_default'] = False LOG.warning("Public network %s was set to non default network", body['id']) def migrate_networks_subnets_ports(self, routers_gw_info): """Migrates networks/ports/router-uplinks from src to dest neutron.""" source_ports = self.source_neutron.list_ports()['ports'] source_subnets = self.source_neutron.list_subnets()['subnets'] source_networks = self.source_neutron.list_networks()['networks'] dest_networks = self.dest_neutron.list_networks()['networks'] dest_ports = self.dest_neutron.list_ports()['ports'] # Remove some fields before creating the new object. # Some fields are not supported for a new object, and some are not # supported by the nsx-v3 plugin drop_subnet_fields = self.basic_ignore_fields + [ 'advanced_service_providers', 'id', 'service_types'] drop_port_fields = self.basic_ignore_fields + [ 'status', 'binding:vif_details', 'binding:vif_type', 'binding:host_id', 'vnic_index', 'dns_assignment'] drop_network_fields = self.basic_ignore_fields + [ 'status', 'subnets', 'availability_zones', 'availability_zone_hints', 'ipv4_address_scope', 'ipv6_address_scope', 'mtu'] if not self.dest_qos_support: drop_network_fields.append('qos_policy_id') drop_port_fields.append('qos_policy_id') # Find out if the destination already has a default public network dest_default_public_net = False for dest_net in dest_networks: if dest_net.get('is_default') and dest_net.get('router:external'): dest_default_public_net = True subnetpools_map = self.migrate_subnetpools() total_num = len(source_networks) LOG.info("Migrating %(nets)s networks, %(subnets)s subnets and " "%(ports)s ports", {'nets': total_num, 'subnets': len(source_subnets), 'ports': len(source_ports)}) for count, network in enumerate(source_networks, 1): external_net = network.get('router:external') body = self.drop_fields(network, drop_network_fields) self.fix_description(body) self.fix_network(body, dest_default_public_net) # only create network if the dest server doesn't have it if self.have_id(network['id'], dest_networks): continue try: created_net = self.dest_neutron.create_network( {'network': body})['network'] LOG.info("Created network %(count)s/%(total)s: %(net)s", {'count': count, 'total': total_num, 'net': created_net}) except Exception as e: # Print the network and exception to help debugging with excutils.save_and_reraise_exception(): LOG.error("Failed to create network %s", body) LOG.error("Source network: %s", network) raise e subnets_map = {} dhcp_subnets = [] count_dhcp_subnet = 0 for subnet_id in network['subnets']: subnet = self.find_subnet_by_id(subnet_id, source_subnets) body = self.drop_fields(subnet, drop_subnet_fields) # specify the network_id that we just created above body['network_id'] = network['id'] self.subnet_drop_ipv6_fields_if_v4(body) self.fix_description(body) # translate the old subnetpool id to the new one if body.get('subnetpool_id'): body['subnetpool_id'] = subnetpools_map.get( body['subnetpool_id']) # Handle DHCP enabled subnets enable_dhcp = False if body['enable_dhcp']: count_dhcp_subnet = count_dhcp_subnet + 1 # disable dhcp on subnet: we will enable it after creating # all the ports to avoid ip collisions body['enable_dhcp'] = False if count_dhcp_subnet > 1: # Do not allow dhcp on the subnet if there is already # another subnet with DHCP as the v3 plugin supports # only one LOG.warning("Disabling DHCP for subnet on net %s: " "The plugin doesn't support multiple " "subnets with DHCP", network['id']) enable_dhcp = False elif external_net: # Do not allow dhcp on the external subnet LOG.warning("Disabling DHCP for subnet on net %s: " "The plugin doesn't support dhcp on " "external networks", network['id']) enable_dhcp = False else: enable_dhcp = True try: created_subnet = self.dest_neutron.create_subnet( {'subnet': body})['subnet'] LOG.info("Created subnet: %s", created_subnet['id']) subnets_map[subnet_id] = created_subnet['id'] if enable_dhcp: dhcp_subnets.append(created_subnet) except n_exc.BadRequest as e: LOG.error("Failed to create subnet: %(subnet)s: %(e)s", {'subnet': subnet, 'e': e}) # NOTE(arosen): this occurs here if you run the script # multiple times as we don't currently # preserve the subnet_id. Also, 409 would be a better # response code for this in neutron :( # create the ports on the network ports = self.get_ports_on_network(network['id'], source_ports) for port in ports: body = self.drop_fields(port, drop_port_fields) self.fix_description(body) self.fix_port(body) # specify the network_id that we just created above port['network_id'] = network['id'] subnet_id = None if port.get('fixed_ips'): old_subnet_id = port['fixed_ips'][0]['subnet_id'] subnet_id = subnets_map.get(old_subnet_id) # remove the old subnet id field from fixed_ips dict for fixed_ips in body['fixed_ips']: del fixed_ips['subnet_id'] # only create port if the dest server doesn't have it if self.have_id(port['id'], dest_ports) is False: if port['device_owner'] == 'network:router_gateway': router_id = port['device_id'] enable_snat = True if router_id in routers_gw_info: # keep the original snat status of the router enable_snat = routers_gw_info[router_id].get( 'enable_snat', True) rtr_body = { "external_gateway_info": {"network_id": port['network_id'], "enable_snat": enable_snat, # keep the original GW IP "external_fixed_ips": port.get('fixed_ips')}} try: self.dest_neutron.update_router( router_id, {'router': rtr_body}) LOG.info("Uplinked router %(rtr)s to external " "network %(net)s", {'rtr': router_id, 'net': port['network_id']}) except Exception as e: LOG.error("Failed to add router gateway " "(%(port)s): %(e)s", {'port': port, 'e': e}) continue # Let the neutron dhcp-agent recreate this on its own if port['device_owner'] == 'network:dhcp': continue # ignore these as we create them ourselves later if port['device_owner'] == 'network:floatingip': continue if (port['device_owner'] == 'network:router_interface' and subnet_id): try: # uplink router_interface ports by creating the # port, and attaching it to the router router_id = port['device_id'] del body['device_owner'] del body['device_id'] created_port = self.dest_neutron.create_port( {'port': body})['port'] LOG.info("Created interface port %(port)s (subnet " "%(subnet)s, ip %(ip)s, mac %(mac)s)", {'port': created_port['id'], 'subnet': subnet_id, 'ip': created_port['fixed_ips'][0][ 'ip_address'], 'mac': created_port['mac_address']}) self.dest_neutron.add_interface_router( router_id, {'port_id': created_port['id']}) LOG.info("Uplinked router %(rtr)s to network " "%(net)s", {'rtr': router_id, 'net': network['id']}) except Exception as e: # NOTE(arosen): this occurs here if you run the # script multiple times as we don't track this. # Note(asarfaty): also if the same network in # source is attached to 2 routers, which the v3 # plugin does not support. LOG.error("Failed to add router interface port" "(%(port)s): %(e)s", {'port': port, 'e': e}) continue try: created_port = self.dest_neutron.create_port( {'port': body})['port'] except Exception as e: # NOTE(arosen): this occurs here if you run the # script multiple times as we don't track this. LOG.error("Failed to create port (%(port)s) : %(e)s", {'port': port, 'e': e}) else: LOG.info("Created port %(port)s (subnet " "%(subnet)s, ip %(ip)s, mac %(mac)s)", {'port': created_port['id'], 'subnet': subnet_id, 'ip': created_port['fixed_ips'][0][ 'ip_address'], 'mac': created_port['mac_address']}) # Enable dhcp on the relevant subnets: for subnet in dhcp_subnets: try: self.dest_neutron.update_subnet(subnet['id'], {'subnet': {'enable_dhcp': True}}) except Exception as e: LOG.error("Failed to enable DHCP on subnet %(subnet)s: " "%(e)s", {'subnet': subnet['id'], 'e': e}) def migrate_floatingips(self): """Migrates floatingips from source to dest neutron.""" try: source_fips = self.source_neutron.list_floatingips()['floatingips'] except Exception: # L3 might be disabled in the source source_fips = [] drop_fip_fields = self.basic_ignore_fields + [ 'status', 'router_id', 'id', 'revision'] total_num = len(source_fips) for count, source_fip in enumerate(source_fips, 1): body = self.drop_fields(source_fip, drop_fip_fields) try: fip = self.dest_neutron.create_floatingip({'floatingip': body}) LOG.info("Created floatingip %(count)s/%(total)s : %(fip)s", {'count': count, 'total': total_num, 'fip': fip}) except Exception as e: LOG.error("Failed to create floating ip (%(fip)s) : %(e)s", {'fip': source_fip, 'e': e}) vmware-nsx-12.0.1/vmware_nsx/api_replay/__init__.py0000666000175100017510000000000013244523345022352 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/api_replay/cli.py0000666000175100017510000001112113244523345021370 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse from vmware_nsx.api_replay import client DEFAULT_DOMAIN_ID = 'default' DEFAULT_LOGFILE = 'nsx_migration.log' class ApiReplayCli(object): def __init__(self): args = self._setup_argparse() client.ApiReplayClient( source_os_tenant_name=args.source_os_project_name, source_os_tenant_domain_id=args.source_os_project_domain_id, source_os_username=args.source_os_username, source_os_user_domain_id=args.source_os_user_domain_id, source_os_password=args.source_os_password, source_os_auth_url=args.source_os_auth_url, dest_os_tenant_name=args.dest_os_project_name, dest_os_tenant_domain_id=args.dest_os_project_domain_id, dest_os_username=args.dest_os_username, dest_os_user_domain_id=args.dest_os_user_domain_id, dest_os_password=args.dest_os_password, dest_os_auth_url=args.dest_os_auth_url, use_old_keystone=args.use_old_keystone, logfile=args.logfile) def _setup_argparse(self): parser = argparse.ArgumentParser() # Arguments required to connect to source # neutron which we will fetch all of the data from. parser.add_argument( "--source-os-username", required=True, help="The source os-username to use to " "gather neutron resources with.") parser.add_argument( "--source-os-user-domain-id", default=DEFAULT_DOMAIN_ID, help="The source os-user-domain-id to use to " "gather neutron resources with.") parser.add_argument( "--source-os-project-name", required=True, help="The source os-project-name to use to " "gather neutron resource with.") parser.add_argument( "--source-os-project-domain-id", default=DEFAULT_DOMAIN_ID, help="The source os-project-domain-id to use to " "gather neutron resource with.") parser.add_argument( "--source-os-password", required=True, help="The password for this user.") parser.add_argument( "--source-os-auth-url", required=True, help="They keystone api endpoint for this user.") # Arguments required to connect to the dest neutron which # we will recreate all of these resources over. parser.add_argument( "--dest-os-username", required=True, help="The dest os-username to use to" "gather neutron resources with.") parser.add_argument( "--dest-os-user-domain-id", default=DEFAULT_DOMAIN_ID, help="The dest os-user-domain-id to use to" "gather neutron resources with.") parser.add_argument( "--dest-os-project-name", required=True, help="The dest os-project-name to use to " "gather neutron resource with.") parser.add_argument( "--dest-os-project-domain-id", default=DEFAULT_DOMAIN_ID, help="The dest os-project-domain-id to use to " "gather neutron resource with.") parser.add_argument( "--dest-os-password", required=True, help="The password for this user.") parser.add_argument( "--dest-os-auth-url", required=True, help="They keystone api endpoint for this user.") parser.add_argument( "--use-old-keystone", default=False, action='store_true', help="Use old keystone client for source authentication.") parser.add_argument( "--logfile", default=DEFAULT_LOGFILE, help="Output logfile.") # NOTE: this will return an error message if any of the # require options are missing. return parser.parse_args() def main(): ApiReplayCli() vmware-nsx-12.0.1/vmware_nsx/osc/0000775000175100017510000000000013244524600016703 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/osc/v2/0000775000175100017510000000000013244524600017232 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/osc/v2/security_group.py0000666000175100017510000001267313244523345022707 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Security group action implementations with nsx extensions""" from osc_lib import utils as osc_utils from openstackclient.identity import common as identity_common from openstackclient.network.v2 import security_group from vmware_nsx._i18n import _ from vmware_nsx.osc.v2 import utils def add_nsx_extensions_to_parser(parser, client_manager, for_create=True): if 'security-group-logging' in utils.get_extensions(client_manager): # logging logging_enable_group = parser.add_mutually_exclusive_group() logging_enable_group.add_argument( '--logging', action='store_true', help=_("Enable logging") ) logging_enable_group.add_argument( '--no-logging', action='store_true', help=_("Disable logging (default)") ) if ('provider-security-group' in utils.get_extensions(client_manager) and for_create): # provider parser.add_argument( '--provider', action='store_true', help=_("Provider security group") ) if 'security-group-policy' in utils.get_extensions(client_manager): # policy parser.add_argument( '--policy', metavar='', help=_("NSX Policy Id") ) def _get_plugin_attrs(attrs, parsed_args, client_manager): if 'security-group-logging' in utils.get_extensions(client_manager): # logging if parsed_args.logging: attrs['logging'] = True if parsed_args.no_logging: attrs['logging'] = False if 'provider-security-group' in utils.get_extensions(client_manager): # provider if hasattr(parsed_args, 'provider') and parsed_args.provider: attrs['provider'] = True if 'security-group-policy' in utils.get_extensions(client_manager): # policy if parsed_args.policy is not None: attrs['policy'] = parsed_args.policy return attrs class NsxCreateSecurityGroup(security_group.CreateSecurityGroup): """Create a new security_group with vmware nsx extensions """ def take_action_network(self, client, parsed_args): #TODO(asarfaty): Better to change the neutron client code of # CreateSecurityGroup:take_action_network to use an internal # get_attributes, and override only this # Build the create attributes. attrs = {} attrs['name'] = parsed_args.name attrs['description'] = self._get_description(parsed_args) if parsed_args.project is not None: identity_client = self.app.client_manager.identity project_id = identity_common.find_project( identity_client, parsed_args.project, parsed_args.project_domain, ).id attrs['tenant_id'] = project_id # add the plugin attributes attrs = _get_plugin_attrs(attrs, parsed_args, self.app.client_manager) # Create the security group and display the results. obj = client.create_security_group(**attrs) display_columns, property_columns = security_group._get_columns(obj) data = osc_utils.get_item_properties( obj, property_columns, formatters=security_group._formatters_network ) return (display_columns, data) def update_parser_common(self, parser): parser = super(NsxCreateSecurityGroup, self).update_parser_common( parser) # Add the nsx attributes to the neutron security group attributes add_nsx_extensions_to_parser( parser, self.app.client_manager, for_create=True) return parser class NsxSetSecurityGroup(security_group.SetSecurityGroup): """Set security group properties with vmware nsx extensions """ def take_action_network(self, client, parsed_args): #TODO(asarfaty): Better to change the neutron client code of # CreateSecurityGroup:take_action_network to use an internal # get_attributes, and override only this obj = client.find_security_group(parsed_args.group, ignore_missing=False) attrs = {} if parsed_args.name is not None: attrs['name'] = parsed_args.name if parsed_args.description is not None: attrs['description'] = parsed_args.description # add the plugin attributes attrs = _get_plugin_attrs(attrs, parsed_args, self.app.client_manager) client.update_security_group(obj, **attrs) def update_parser_common(self, parser): parser = super(NsxSetSecurityGroup, self).update_parser_common(parser) # Add the nsx attributes to the neutron security group attributes add_nsx_extensions_to_parser( parser, self.app.client_manager, for_create=False) return parser vmware-nsx-12.0.1/vmware_nsx/osc/v2/utils.py0000666000175100017510000000241113244523345020751 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from osc_lib import utils as osc_utils cached_extensions = None def get_extensions(client_manager): """Return a list of all current extensions aliases""" # Return previously calculated results global cached_extensions if cached_extensions is not None: return cached_extensions # Get supported extensions from the manager data = client_manager.network.extensions() extensions = [] for s in data: prop = osc_utils.get_item_properties( s, ('Alias',), formatters={}) extensions.append(prop[0]) # Save the results in the global cache cached_extensions = extensions return extensions vmware-nsx-12.0.1/vmware_nsx/osc/v2/subnet.py0000666000175100017510000000544613244523345021124 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Subnet extensions action implementations""" from openstackclient.network.v2 import subnet from vmware_nsx._i18n import _ from vmware_nsx.osc.v2 import utils def add_nsx_extensions_to_parser(parser, client_manager): if 'dhcp-mtu' in utils.get_extensions(client_manager): # DHCP MTU parser.add_argument( '--dhcp-mtu', type=int, metavar='', help=_("DHCP MTU") ) if 'dns-search-domain' in utils.get_extensions(client_manager): # DNS search domain parser.add_argument( '--dns-search-domain', metavar='', help=_("DNS search Domain") ) # overriding the subnet module global method, to add the nsx extensions super_get_attrs = subnet._get_attrs def _get_plugin_attrs(client_manager, parsed_args, is_create=True): attrs = super_get_attrs(client_manager, parsed_args, is_create) if 'dhcp-mtu' in utils.get_extensions(client_manager): # DHCP MTU if parsed_args.dhcp_mtu is not None: attrs['dhcp_mtu'] = int(parsed_args.dhcp_mtu) parsed_args.dhcp_mtu = None if 'dns-search-domain' in utils.get_extensions(client_manager): # DNS search domain if parsed_args.dns_search_domain is not None: attrs['dns_search_domain'] = parsed_args.dns_search_domain parsed_args.dns_search_domain = None return attrs subnet._get_attrs = _get_plugin_attrs class NsxCreateSubnet(subnet.CreateSubnet): """Create a new subnet with vmware nsx extensions """ def get_parser(self, prog_name): # Add the nsx attributes to the neutron subnet attributes parser = super(NsxCreateSubnet, self).get_parser(prog_name) add_nsx_extensions_to_parser(parser, self.app.client_manager) return parser class NsxSetSubnet(subnet.SetSubnet): """Set subnet properties with vmware nsx extensions """ def get_parser(self, prog_name): # Add the nsx attributes to the neutron subnet attributes parser = super(NsxSetSubnet, self).get_parser(prog_name) add_nsx_extensions_to_parser(parser, self.app.client_manager) return parser vmware-nsx-12.0.1/vmware_nsx/osc/v2/__init__.py0000666000175100017510000000000013244523345021340 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/osc/v2/router.py0000666000175100017510000000565113244523345021142 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Router action implementations with nsx extensions""" from openstackclient.network.v2 import router from vmware_nsx._i18n import _ from vmware_nsx.extensions import routersize from vmware_nsx.extensions import routertype from vmware_nsx.osc.v2 import utils def add_nsx_extensions_to_parser(parser, client_manager): if 'nsxv-router-size' in utils.get_extensions(client_manager): # router-size parser.add_argument( '--router-size', metavar='', choices=routersize.VALID_EDGE_SIZES, help=_("Router Size") ) if 'nsxv-router-type' in utils.get_extensions(client_manager): # router-type parser.add_argument( '--router-type', metavar='', choices=routertype.VALID_TYPES, help=_("Router Type") ) # overriding the router module global method, to add the nsx extensions super_get_attrs = router._get_attrs def _get_plugin_attrs(client_manager, parsed_args): attrs = super_get_attrs(client_manager, parsed_args) if 'nsxv-router-type' in utils.get_extensions(client_manager): # Router type if parsed_args.router_type is not None: attrs['router_type'] = parsed_args.router_type parsed_args.router_type = None if 'nsxv-router-size' in utils.get_extensions(client_manager): # Router size if parsed_args.router_size is not None: attrs['router_size'] = parsed_args.router_size parsed_args.router_size = None return attrs router._get_attrs = _get_plugin_attrs class NsxCreateRouter(router.CreateRouter): """Create a new router with vmware nsx extensions """ def get_parser(self, prog_name): # Add the nsx attributes to the neutron router attributes parser = super(NsxCreateRouter, self).get_parser(prog_name) add_nsx_extensions_to_parser(parser, self.app.client_manager) return parser class NsxSetRouter(router.SetRouter): """Set router properties with vmware nsx extensions """ def get_parser(self, prog_name): # Add the nsx attributes to the neutron router attributes parser = super(NsxSetRouter, self).get_parser(prog_name) add_nsx_extensions_to_parser(parser, self.app.client_manager) return parser vmware-nsx-12.0.1/vmware_nsx/osc/v2/project_plugin_map.py0000666000175100017510000000744513244523413023502 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Project Plugin mapping action implementations""" from openstack.network import network_service from openstack import resource2 as resource from openstackclient.i18n import _ from osc_lib.command import command from osc_lib import utils project_plugin_maps_path = "/project-plugin-maps" class ProjectPluginMap(resource.Resource): resource_key = 'project_plugin_map' resources_key = 'project_plugin_maps' base_path = '/project-plugin-maps' service = network_service.NetworkService() # capabilities allow_create = True allow_get = True allow_update = False allow_delete = False allow_list = True _query_mapping = resource.QueryParameters( 'plugin', 'project', 'tenant_id') # Properties id = resource.Body('id') project = resource.Body('project') plugin = resource.Body('plugin') tenant_id = resource.Body('tenant_id') def _get_columns(item): columns = ['project', 'plugin'] return columns, columns def _get_attrs(parsed_args): attrs = {} if parsed_args.project is not None: attrs['project'] = parsed_args.project if parsed_args.plugin is not None: attrs['plugin'] = parsed_args.plugin return attrs class CreateProjectPluginMap(command.ShowOne): _description = _("Create project plugin map") def get_parser(self, prog_name): parser = super(CreateProjectPluginMap, self).get_parser(prog_name) parser.add_argument( 'project', metavar="", help=_("project") ) parser.add_argument( '--plugin', metavar="", required=True, help=_('Plugin.)') ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network attrs = _get_attrs(parsed_args) obj = client._create(ProjectPluginMap, **attrs) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns, formatters={}) return (display_columns, data) class ListProjectPluginMap(command.Lister): _description = _("List project plugin mappings") def take_action(self, parsed_args): client = self.app.client_manager.network columns = ( 'project', 'plugin' ) column_headers = ( 'Project ID', 'Plugin', ) client = self.app.client_manager.network data = client._list(ProjectPluginMap) return (column_headers, (utils.get_item_properties( s, columns, ) for s in data)) class ShowProjectPluginMap(command.ShowOne): _description = _("Display project plugins mapping") def get_parser(self, prog_name): parser = super(ShowProjectPluginMap, self).get_parser(prog_name) parser.add_argument( 'id', metavar='', help=_('id') ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network obj = client._get(ProjectPluginMap, parsed_args.id) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns) return display_columns, data vmware-nsx-12.0.1/vmware_nsx/osc/v2/port.py0000666000175100017510000001047013244523345020601 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Port action implementations with nsx extensions""" from openstackclient.network.v2 import port from osc_lib import utils as osc_utils from vmware_nsx._i18n import _ from vmware_nsx.osc.v2 import utils def add_nsx_extensions_to_parser(parser, client_manager, is_create=True): allowed_extensions = utils.get_extensions(client_manager) # Provider security group (only for create action) if (is_create and 'provider-security-group' in allowed_extensions): parser.add_argument( '--provider-security-group', metavar='', action='append', dest='provider_security_groups', help=_("Provider Security group to associate with this port " "(name or ID) " "(repeat option to set multiple security groups)") ) if 'vnic-index' in allowed_extensions: # vnic index parser.add_argument( '--vnic-index', type=int, metavar='', help=_("Vnic index") ) if 'mac-learning' in allowed_extensions: # mac-learning-enabled mac_learning_group = parser.add_mutually_exclusive_group() mac_learning_group.add_argument( '--enable-mac-learning', action='store_true', help=_("Enable MAC learning") ) mac_learning_group.add_argument( '--disable-mac-learning', action='store_true', help=_("Disable MAC learning (Default") ) # overriding the port module global method, to add the nsx extensions super_get_attrs = port._get_attrs def _get_plugin_attrs(client_manager, parsed_args): allowed_extensions = utils.get_extensions(client_manager) attrs = super_get_attrs(client_manager, parsed_args) # Provider security groups if 'provider-security-group' in allowed_extensions: if (hasattr(parsed_args, 'provider_security_groups') and parsed_args.provider_security_groups is not None): attrs['provider_security_groups'] = [ client_manager.network.find_security_group( sg, ignore_missing=False).id for sg in parsed_args.provider_security_groups] if 'vnic-index' in allowed_extensions: # Vnic index if parsed_args.vnic_index is not None: attrs['vnic_index'] = parsed_args.vnic_index parsed_args.vnic_index = None if 'mac-learning' in allowed_extensions: # mac-learning-enabled if parsed_args.enable_mac_learning: attrs['mac_learning_enabled'] = True if parsed_args.disable_mac_learning: attrs['mac_learning_enabled'] = False return attrs port._get_attrs = _get_plugin_attrs # Update the port module global _formatters, to format provider security # groups too port._formatters['provider_security_groups'] = osc_utils.format_list class NsxCreatePort(port.CreatePort): """Create a new port with vmware nsx extensions """ def get_parser(self, prog_name): # Add the nsx attributes to the neutron port attributes parser = super(NsxCreatePort, self).get_parser(prog_name) add_nsx_extensions_to_parser(parser, self.app.client_manager, is_create=True) return parser class NsxSetPort(port.SetPort): """Set port properties with vmware nsx extensions """ def get_parser(self, prog_name): # Add the nsx attributes to the neutron port attributes parser = super(NsxSetPort, self).get_parser(prog_name) add_nsx_extensions_to_parser(parser, self.app.client_manager, is_create=False) return parser vmware-nsx-12.0.1/vmware_nsx/osc/__init__.py0000666000175100017510000000000013244523345021011 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/osc/plugin.py0000666000175100017510000000301713244523345020563 0ustar zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from osc_lib import utils from oslo_log import log as logging LOG = logging.getLogger(__name__) DEFAULT_API_VERSION = '2' API_VERSION_OPTION = 'vmware_nsx_api_version' API_NAME = 'nsxclient' API_VERSIONS = { '2.0': 'nsxclient.v2_0.client.Client', '2': 'nsxclient.v2_0.client.Client', } def make_client(instance): """Returns a client.""" nsxclient = utils.get_client_class( API_NAME, instance._api_version[API_NAME], API_VERSIONS) LOG.debug('Instantiating vmware nsx client: %s', nsxclient) client = nsxclient(session=instance.session, region_name=instance._region_name, endpoint_type=instance._interface, insecure=instance._insecure, ca_cert=instance._cacert) return client def build_option_parser(parser): """Hook to add global options""" return parser vmware-nsx-12.0.1/vmware_nsx/nsx_cluster.py0000666000175100017510000001013213244523345021046 0ustar zuulzuul00000000000000# Copyright 2012 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import six from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions LOG = logging.getLogger(__name__) DEFAULT_PORT = 443 # Raise if one of those attributes is not specified REQUIRED_ATTRIBUTES = ['default_tz_uuid', 'nsx_user', 'nsx_password', 'nsx_controllers'] # Emit an INFO log if one of those attributes is not specified IMPORTANT_ATTRIBUTES = ['default_l3_gw_service_uuid'] # Deprecated attributes DEPRECATED_ATTRIBUTES = ['metadata_dhcp_host_route', 'nvp_user', 'nvp_password', 'nvp_controllers'] class NSXCluster(object): """NSX cluster class. Encapsulates controller connections and the API client for a NSX cluster. Controller-specific parameters, such as timeouts are stored in the elements of the controllers attribute, which are dicts. """ def __init__(self, **kwargs): self._required_attributes = REQUIRED_ATTRIBUTES[:] self._important_attributes = IMPORTANT_ATTRIBUTES[:] self._deprecated_attributes = {} self._sanity_check(kwargs) for opt, val in six.iteritems(self._deprecated_attributes): LOG.deprecated(_("Attribute '%s' has been deprecated or moved " "to a new section. See new configuration file " "for details."), opt) depr_func = getattr(self, '_process_%s' % opt, None) if depr_func: depr_func(val) # If everything went according to plan these two lists should be empty if self._required_attributes: raise exceptions.InvalidClusterConfiguration( invalid_attrs=self._required_attributes) if self._important_attributes: LOG.info("The following cluster attributes were " "not specified: %s'", self._important_attributes) # The API client will be explicitly created by users of this class self.api_client = None def _sanity_check(self, options): # Iterating this way ensures the conf parameters also # define the structure of this class for arg in cfg.CONF: if arg not in DEPRECATED_ATTRIBUTES: setattr(self, arg, options.get(arg, cfg.CONF.get(arg))) self._process_attribute(arg) elif options.get(arg) is not None: # Process deprecated attributes only if specified self._deprecated_attributes[arg] = options.get(arg) def _process_attribute(self, attribute): # Process the attribute only if it's not empty! if getattr(self, attribute, None): if attribute in self._required_attributes: self._required_attributes.remove(attribute) if attribute in self._important_attributes: self._important_attributes.remove(attribute) handler_func = getattr(self, '_process_%s' % attribute, None) if handler_func: handler_func() def _process_nsx_controllers(self): # If this raises something is not right, so let it bubble up # TODO(salvatore-orlando): Also validate attribute here for i, ctrl in enumerate(self.nsx_controllers or []): if len(ctrl.split(':')) == 1: self.nsx_controllers[i] = '%s:%s' % (ctrl, DEFAULT_PORT) def _process_nvp_controllers(self): self.nsx_controllers = self.nvp_controllers self._process_nsx_controllers() vmware-nsx-12.0.1/vmware_nsx/version.py0000666000175100017510000000125113244523345020164 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('vmware-nsx') vmware-nsx-12.0.1/vmware_nsx/services/0000775000175100017510000000000013244524600017742 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/vpnaas/0000775000175100017510000000000013244524600021232 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/vpnaas/nsxv/0000775000175100017510000000000013244524600022230 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/vpnaas/nsxv/ipsec_driver.py0000666000175100017510000004036613244523345025300 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.plugins import directory from neutron_vpnaas.services.vpn import service_drivers from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as vcns_exc from vmware_nsx.services.vpnaas.nsxv import ipsec_validator LOG = logging.getLogger(__name__) IPSEC = 'ipsec' class NSXvIPsecVpnDriver(service_drivers.VpnDriver): def __init__(self, service_plugin): self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin(): self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_V) self._vcns = self._core_plugin.nsx_v.vcns validator = ipsec_validator.IPsecValidator(service_plugin) super(NSXvIPsecVpnDriver, self).__init__(service_plugin, validator) @property def l3_plugin(self): return self._core_plugin @property def service_type(self): return IPSEC def _get_router_edge_id(self, context, vpnservice_id): vpnservice = self.service_plugin._get_vpnservice(context, vpnservice_id) router_id = vpnservice['router_id'] edge_binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if not edge_binding: msg = _("Couldn't find edge binding for router %s") % router_id raise nsxv_exc.NsxPluginException(err_msg=msg) if edge_binding['edge_type'] == nsxv_constants.VDR_EDGE: edge_manager = self._core_plugin.edge_manager router_id = edge_manager.get_plr_by_tlr_id(context, router_id) binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) edge_id = binding['edge_id'] else: # Get exclusive edge id edge_id = edge_binding['edge_id'] return router_id, edge_id def _convert_ipsec_conn(self, context, ipsec_site_connection): ipsec_id = ipsec_site_connection['ipsecpolicy_id'] vpnservice_id = ipsec_site_connection['vpnservice_id'] ipsecpolicy = self.service_plugin.get_ipsecpolicy(context, ipsec_id) vpnservice = self.service_plugin._get_vpnservice(context, vpnservice_id) local_cidr = vpnservice['subnet']['cidr'] router_id = vpnservice['router_id'] router = self._core_plugin.get_router(context, router_id) local_addr = (router['external_gateway_info']['external_fixed_ips'] [0]["ip_address"]) encrypt = nsxv_constants.ENCRYPTION_ALGORITHM_MAP.get( ipsecpolicy.get('encryption_algorithm')) site = { 'enabled': True, 'enablePfs': True, 'dhGroup': nsxv_constants.PFS_MAP.get(ipsecpolicy.get('pfs')), 'name': ipsec_site_connection.get('name'), 'description': ipsec_site_connection.get('description'), 'localId': local_addr, 'localIp': local_addr, 'peerId': ipsec_site_connection['peer_id'], 'peerIp': ipsec_site_connection.get('peer_address'), 'localSubnets': { 'subnets': [local_cidr]}, 'peerSubnets': { 'subnets': ipsec_site_connection.get('peer_cidrs')}, 'authenticationMode': ipsec_site_connection.get('auth_mode'), 'psk': ipsec_site_connection.get('psk'), 'encryptionAlgorithm': encrypt } return site def _generate_new_sites(self, edge_id, ipsec_site_conn): # Fetch the previous ipsec vpn configuration ipsecvpn_configs = self._get_ipsec_config(edge_id) vse_sites = [] if ipsecvpn_configs[1]['enabled']: vse_sites = ([site for site in ipsecvpn_configs[1]['sites']['sites']]) vse_sites.append(ipsec_site_conn) return vse_sites def _generate_ipsecvpn_firewall_rules(self, plugin_type, context, edge_id=None): ipsecvpn_configs = self._get_ipsec_config(edge_id) ipsec_vpn_fw_rules = [] if ipsecvpn_configs[1]['enabled']: for site in ipsecvpn_configs[1]['sites']['sites']: peer_subnets = site['peerSubnets']['subnets'] local_subnets = site['localSubnets']['subnets'] ipsec_vpn_fw_rules.append({ 'name': 'VPN ' + site.get('name', 'rule'), 'action': 'allow', 'enabled': True, 'source_ip_address': peer_subnets, 'destination_ip_address': local_subnets}) return ipsec_vpn_fw_rules def _update_firewall_rules(self, context, vpnservice_id): vpnservice = self.service_plugin._get_vpnservice(context, vpnservice_id) router_db = ( self._core_plugin._get_router(context, vpnservice['router_id'])) self._core_plugin._update_subnets_and_dnat_firewall(context, router_db) def _update_status(self, context, vpn_service_id, ipsec_site_conn_id, status, updated_pending_status=True): status_list = [] vpn_status = {} ipsec_site_conn = {} vpn_status['id'] = vpn_service_id vpn_status['updated_pending_status'] = updated_pending_status vpn_status['status'] = status ipsec_site_conn['status'] = status ipsec_site_conn['updated_pending_status'] = updated_pending_status vpn_status['ipsec_site_connections'] = {ipsec_site_conn_id: ipsec_site_conn} status_list.append(vpn_status) self.service_plugin.update_status_by_agent(context, status_list) def create_ipsec_site_connection(self, context, ipsec_site_connection): LOG.debug('Creating ipsec site connection %(conn_info)s.', {"conn_info": ipsec_site_connection}) new_ipsec = self._convert_ipsec_conn(context, ipsec_site_connection) vpnservice_id = ipsec_site_connection['vpnservice_id'] edge_id = self._get_router_edge_id(context, vpnservice_id)[1] with locking.LockManager.get_lock(edge_id): vse_sites = self._generate_new_sites(edge_id, new_ipsec) ipsec_id = ipsec_site_connection["id"] try: LOG.debug('Updating ipsec vpn configuration %(vse_sites)s.', {'vse_sites': vse_sites}) self._update_ipsec_config(edge_id, vse_sites, enabled=True) except vcns_exc.VcnsApiException: self._update_status(context, vpnservice_id, ipsec_id, "ERROR") msg = (_("Failed to create ipsec site connection " "configuration with %(edge_id)s.") % {'edge_id': edge_id}) raise nsxv_exc.NsxPluginException(err_msg=msg) LOG.debug('Updating ipsec vpn firewall') try: self._update_firewall_rules(context, vpnservice_id) except vcns_exc.VcnsApiException: self._update_status(context, vpnservice_id, ipsec_id, "ERROR") msg = (_("Failed to update firewall rule for ipsec vpn " "with %(edge_id)s.") % {'edge_id': edge_id}) raise nsxv_exc.NsxPluginException(err_msg=msg) self._update_status(context, vpnservice_id, ipsec_id, "ACTIVE") def _get_ipsec_config(self, edge_id): return self._vcns.get_ipsec_config(edge_id) def delete_ipsec_site_connection(self, context, ipsec_site_conn): LOG.debug('Deleting ipsec site connection %(site)s.', {"site": ipsec_site_conn}) ipsec_id = ipsec_site_conn['id'] edge_id = self._get_router_edge_id(context, ipsec_site_conn['vpnservice_id'])[1] with locking.LockManager.get_lock(edge_id): del_site, vse_sites = self._find_vse_site(context, edge_id, ipsec_site_conn) if not del_site: LOG.error("Failed to find ipsec_site_connection " "%(ipsec_site_conn)s with %(edge_id)s.", {'ipsec_site_conn': ipsec_site_conn, 'edge_id': edge_id}) raise nsxv_exc.NsxIPsecVpnMappingNotFound(conn=ipsec_id) vse_sites.remove(del_site) enabled = True if vse_sites else False try: self._update_ipsec_config(edge_id, vse_sites, enabled) except vcns_exc.VcnsApiException: msg = (_("Failed to delete ipsec site connection " "configuration with edge_id: %(edge_id)s.") % {'egde_id': edge_id}) raise nsxv_exc.NsxPluginException(err_msg=msg) try: self._update_firewall_rules(context, ipsec_site_conn['vpnservice_id']) except vcns_exc.VcnsApiException: msg = _("Failed to update firewall rule for ipsec vpn with " "%(edge_id)s.") % {'edge_id': edge_id} raise nsxv_exc.NsxPluginException(err_msg=msg) def _find_vse_site(self, context, edge_id, site): # Fetch the previous ipsec vpn configuration ipsecvpn_configs = self._get_ipsec_config(edge_id)[1] vpnservice = self.service_plugin._get_vpnservice(context, site['vpnservice_id']) local_cidr = vpnservice['subnet']['cidr'] old_site = None vse_sites = None if ipsecvpn_configs['enabled']: vse_sites = ipsecvpn_configs['sites'].get('sites') for s in vse_sites: if ((s['peerSubnets'].get('subnets') == site['peer_cidrs']) and (s['localSubnets'].get('subnets')[0] == local_cidr)): old_site = s break return old_site, vse_sites def _update_site_dict(self, context, edge_id, site, ipsec_site_connection): # Fetch the previous ipsec vpn configuration old_site, vse_sites = self._find_vse_site(context, edge_id, site) if old_site: vse_sites.remove(old_site) if 'peer_addresses' in ipsec_site_connection: old_site['peerIp'] = ipsec_site_connection['peer_address'] if 'peer_cidrs' in ipsec_site_connection: old_site['peerSubnets']['subnets'] = (ipsec_site_connection ['peer_cidrs']) vse_sites.append(old_site) return vse_sites def update_ipsec_site_connection(self, context, old_ipsec_conn, ipsec_site_connection): LOG.debug('Updating ipsec site connection %(site)s.', {"site": ipsec_site_connection}) vpnservice_id = old_ipsec_conn['vpnservice_id'] ipsec_id = old_ipsec_conn['id'] edge_id = self._get_router_edge_id(context, vpnservice_id)[1] with locking.LockManager.get_lock(edge_id): vse_sites = self._update_site_dict(context, edge_id, old_ipsec_conn, ipsec_site_connection) if not vse_sites: self._update_status(context, vpnservice_id, ipsec_id, "ERROR") LOG.error("Failed to find ipsec_site_connection " "%(ipsec_site_conn)s with %(edge_id)s.", {'ipsec_site_conn': ipsec_site_connection, 'edge_id': edge_id}) raise nsxv_exc.NsxIPsecVpnMappingNotFound(conn=ipsec_id) try: LOG.debug('Updating ipsec vpn configuration %(vse_sites)s.', {'vse_sites': vse_sites}) self._update_ipsec_config(edge_id, vse_sites) except vcns_exc.VcnsApiException: self._update_status(context, vpnservice_id, ipsec_id, "ERROR") msg = (_("Failed to create ipsec site connection " "configuration with %(edge_id)s.") % {'edge_id': edge_id}) raise nsxv_exc.NsxPluginException(err_msg=msg) if 'peer_cidrs' in ipsec_site_connection: # Update firewall old_ipsec_conn['peer_cidrs'] = ( ipsec_site_connection['peer_cidrs']) try: self._update_firewall_rules(context, vpnservice_id) except vcns_exc.VcnsApiException: self._update_status(context, vpnservice_id, ipsec_id, "ERROR") msg = (_("Failed to update firewall rule for ipsec " "vpn with %(edge_id)s.") % {'edge_id': edge_id}) raise nsxv_exc.NsxPluginException(err_msg=msg) def _get_gateway_ips(self, router): """Obtain the IPv4 and/or IPv6 GW IP for the router. If there are multiples, (arbitrarily) use the first one. """ v4_ip = v6_ip = None for fixed_ip in router.gw_port['fixed_ips']: addr = fixed_ip['ip_address'] vers = netaddr.IPAddress(addr).version if vers == 4: if v4_ip is None: v4_ip = addr elif v6_ip is None: v6_ip = addr return v4_ip, v6_ip def create_vpnservice(self, context, vpnservice): LOG.debug('Creating VPN service %(vpn)s', {'vpn': vpnservice}) vpnservice_id = vpnservice['id'] try: self.validator.validate_vpnservice(context, vpnservice) except Exception: with excutils.save_and_reraise_exception(): # Rolling back change on the neutron self.service_plugin.delete_vpnservice(context, vpnservice_id) vpnservice = self.service_plugin._get_vpnservice(context, vpnservice_id) v4_ip, v6_ip = self._get_gateway_ips(vpnservice.router) if v4_ip: vpnservice['external_v4_ip'] = v4_ip if v6_ip: vpnservice['external_v6_ip'] = v6_ip self.service_plugin.set_external_tunnel_ips(context, vpnservice_id, v4_ip=v4_ip, v6_ip=v6_ip) def update_vpnservice(self, context, old_vpnservice, vpnservice): pass def delete_vpnservice(self, context, vpnservice): pass def _update_ipsec_config(self, edge_id, sites, enabled=True): ipsec_config = {'featureType': "ipsec_4.0", 'enabled': enabled} ipsec_config['sites'] = {'sites': sites} try: self._vcns.update_ipsec_config(edge_id, ipsec_config) except vcns_exc.VcnsApiException: msg = _("Failed to update ipsec vpn configuration with " "edge_id: %s") % edge_id raise nsxv_exc.NsxPluginException(err_msg=msg) vmware-nsx-12.0.1/vmware_nsx/services/vpnaas/nsxv/__init__.py0000666000175100017510000000000013244523345024336 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/vpnaas/nsxv/ipsec_validator.py0000666000175100017510000001300713244523345025762 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_vpnaas.db.vpn import vpn_validator from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.common import nsxv_constants LOG = logging.getLogger(__name__) class IPsecValidator(vpn_validator.VpnReferenceValidator): """Validator methods for Vmware VPN support""" def __init__(self, service_plugin): super(IPsecValidator, self).__init__() self.vpn_plugin = service_plugin def validate_ikepolicy_version(self, policy_info): """NSX Edge provides IKEv1""" version = policy_info.get('ike_version') if version != 'v1': msg = _("Unsupported ike policy %s! only v1 " "is supported right now.") % version raise nsxv_exc.NsxVpnValidationError(details=msg) def validate_ikepolicy_pfs(self, policy_info): # Check whether pfs is allowed. if not nsxv_constants.PFS_MAP.get(policy_info['pfs']): msg = _("Unsupported pfs: %(pfs)s! currently only " "the following pfs are supported on VSE: %s") % { 'pfs': policy_info['pfs'], 'supported': nsxv_constants.PFS_MAP} raise nsxv_exc.NsxVpnValidationError(details=msg) def validate_encryption_algorithm(self, policy_info): encryption = policy_info['encryption_algorithm'] if encryption not in nsxv_constants.ENCRYPTION_ALGORITHM_MAP: msg = _("Unsupported encryption_algorithm: %(algo)s! please " "select one of the following supported algorithms: " "%(supported_algos)s") % { 'algo': encryption, 'supported_algos': nsxv_constants.ENCRYPTION_ALGORITHM_MAP} raise nsxv_exc.NsxVpnValidationError(details=msg) def validate_ipsec_policy(self, context, policy_info): """Ensure IPSec policy encap mode is tunnel for current REST API.""" mode = policy_info['encapsulation_mode'] if mode not in nsxv_constants.ENCAPSULATION_MODE_ALLOWED: msg = _("Unsupported encapsulation mode: %s! currently only" "'tunnel' mode is supported.") % mode raise nsxv_exc.NsxVpnValidationError(details=msg) def validate_policies_matching_algorithms(self, ikepolicy, ipsecpolicy): # In VSE, Phase 1 and Phase 2 share the same encryption_algorithm # and authentication algorithms setting. At present, just record the # discrepancy error in log and take ipsecpolicy to do configuration. keys = ('auth_algorithm', 'encryption_algorithm', 'pfs') for key in keys: if ikepolicy[key] != ipsecpolicy[key]: LOG.warning("IKEPolicy and IPsecPolicy should have consistent " "auth_algorithm, encryption_algorithm and pfs for " "VSE!") break def _is_shared_router(self, router): return router.get('router_type') == nsxv_constants.SHARED def _validate_router(self, context, router_id): # Only support distributed and exclusive router type router = self.core_plugin.get_router(context, router_id) if self._is_shared_router(router): msg = _("Router type is not supported for VPN service, only " "support distributed and exclusive router") raise nsxv_exc.NsxVpnValidationError(details=msg) def validate_vpnservice(self, context, vpnservice): """Called upon create/update of a service""" # Call general validations super(IPsecValidator, self).validate_vpnservice( context, vpnservice) # Call specific NSX validations self._validate_router(context, vpnservice['router_id']) if not vpnservice['subnet_id']: # we currently do not support multiple subnets so a subnet must # be defined msg = _("Subnet must be defined in a service") raise nsxv_exc.NsxVpnValidationError(details=msg) def validate_ipsec_site_connection(self, context, ipsec_site_conn): ike_policy_id = ipsec_site_conn.get('ikepolicy_id') if ike_policy_id: ikepolicy = self.vpn_plugin.get_ikepolicy(context, ike_policy_id) self.validate_ikepolicy_version(ikepolicy) self.validate_ikepolicy_pfs(ikepolicy) self.validate_encryption_algorithm(ikepolicy) ipsec_policy_id = ipsec_site_conn.get('ipsecpolicy_id') if ipsec_policy_id: ipsecpolicy = self.vpn_plugin.get_ipsecpolicy(context, ipsec_policy_id) self.validate_ipsec_policy(context, ipsecpolicy) if ike_policy_id and ipsec_policy_id: self.validate_policies_matching_algorithms(ikepolicy, ipsecpolicy) vmware-nsx-12.0.1/vmware_nsx/services/vpnaas/__init__.py0000666000175100017510000000000013244523345023340 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/vpnaas/nsxv3/0000775000175100017510000000000013244524600022313 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/vpnaas/nsxv3/ipsec_driver.py0000666000175100017510000007305313244523345025362 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context as n_context from neutron_lib import exceptions as nexception from neutron_lib.plugins import directory from neutron_vpnaas.services.vpn import service_drivers from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.services.vpnaas.nsxv3 import ipsec_utils from vmware_nsx.services.vpnaas.nsxv3 import ipsec_validator from vmware_nsxlib.v3 import exceptions as nsx_lib_exc from vmware_nsxlib.v3 import nsx_constants as consts from vmware_nsxlib.v3 import vpn_ipsec LOG = logging.getLogger(__name__) IPSEC = 'ipsec' VPN_PORT_OWNER = constants.DEVICE_OWNER_NEUTRON_PREFIX + 'vpnservice' class RouterWithSNAT(nexception.BadRequest): message = _("Router %(router_id)s has a VPN service and cannot enable " "SNAT") class NSXv3IPsecVpnDriver(service_drivers.VpnDriver): def __init__(self, service_plugin): self.vpn_plugin = service_plugin self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin(): self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_T) self._nsxlib = self._core_plugin.nsxlib self._nsx_vpn = self._nsxlib.vpn_ipsec validator = ipsec_validator.IPsecV3Validator(service_plugin) super(NSXv3IPsecVpnDriver, self).__init__(service_plugin, validator) registry.subscribe( self._delete_local_endpoint, resources.ROUTER_GATEWAY, events.AFTER_DELETE) @property def l3_plugin(self): return self._core_plugin @property def service_type(self): return IPSEC def _translate_cidr(self, cidr): return self._nsxlib.firewall_section.get_ip_cidr_reference( cidr, consts.IPV6 if netaddr.valid_ipv6(cidr) else consts.IPV4) def _translate_addresses_to_target(self, cidrs): return [self._translate_cidr(ip) for ip in cidrs] def _generate_ipsecvpn_firewall_rules(self, plugin_type, context, router_id=None): """Return the firewall rules needed to allow vpn traffic""" fw_rules = [] # get all the active services of this router filters = {'router_id': [router_id], 'status': [constants.ACTIVE]} services = self.vpn_plugin.get_vpnservices( context.elevated(), filters=filters) if not services: return fw_rules for srv in services: subnet = self.l3_plugin.get_subnet( context.elevated(), srv['subnet_id']) local_cidrs = [subnet['cidr']] # get all the active connections of this service filters = {'vpnservice_id': [srv['id']], 'status': [constants.ACTIVE]} connections = self.vpn_plugin.get_ipsec_site_connections( context.elevated(), filters=filters) for conn in connections: peer_cidrs = conn['peer_cidrs'] fw_rules.append({ 'display_name': 'VPN connection ' + conn['id'], 'action': consts.FW_ACTION_ALLOW, 'sources': self._translate_addresses_to_target( peer_cidrs), 'destinations': self._translate_addresses_to_target( local_cidrs)}) return fw_rules def _update_firewall_rules(self, context, vpnservice): LOG.debug("Updating vpn firewall rules for router %s", vpnservice['router_id']) self._core_plugin.update_router_firewall( context, vpnservice['router_id']) def _update_router_advertisement(self, context, vpnservice): LOG.debug("Updating router advertisement rules for router %s", vpnservice['router_id']) router_id = vpnservice['router_id'] # skip no-snat router as it is already advertised, # and router with no gw rtr = self.l3_plugin.get_router(context, router_id) if (not rtr.get('external_gateway_info') or not rtr['external_gateway_info'].get('enable_snat', True)): return rules = [] # get all the active services of this router filters = {'router_id': [router_id], 'status': [constants.ACTIVE]} services = self.vpn_plugin.get_vpnservices( context.elevated(), filters=filters) for srv in services: # use only services with active connections filters = {'vpnservice_id': [srv['id']], 'status': [constants.ACTIVE]} connections = self.vpn_plugin.get_ipsec_site_connections( context.elevated(), filters=filters) if not connections: continue subnet = self.l3_plugin.get_subnet( context.elevated(), srv['subnet_id']) rules.append({ 'display_name': 'VPN advertisement service ' + srv['id'], 'action': consts.FW_ACTION_ALLOW, 'networks': [subnet['cidr']]}) logical_router_id = db.get_nsx_router_id(context.session, router_id) self._nsxlib.logical_router.update_advertisement_rules( logical_router_id, rules) def _update_status(self, context, vpn_service_id, ipsec_site_conn_id, status, updated_pending_status=True): ipsec_site_conn = {'status': status, 'updated_pending_status': updated_pending_status} vpn_status = {'id': vpn_service_id, 'updated_pending_status': updated_pending_status, 'status': status, 'ipsec_site_connections': {ipsec_site_conn_id: ipsec_site_conn}} status_list = [vpn_status] self.service_plugin.update_status_by_agent(context, status_list) def _nsx_tags(self, context, connection): return self._nsxlib.build_v3_tags_payload( connection, resource_type='os-vpn-connection-id', project_name=context.tenant_name) def _nsx_tags_for_reused(self): # Service & Local endpoint can be reused cross tenants, # so we do not add the tenant/object id. return self._nsxlib.build_v3_api_version_tag() def _create_ike_profile(self, context, connection): """Create an ike profile for a connection""" # Note(asarfaty) the NSX profile can be reused, so we can consider # creating it only once in the future, and keeping a use-count for it. # There is no driver callback for profiles creation so it has to be # done on connection creation. ike_policy_id = connection['ikepolicy_id'] ikepolicy = self.vpn_plugin.get_ikepolicy(context, ike_policy_id) try: profile = self._nsx_vpn.ike_profile.create( ikepolicy['name'], description=ikepolicy['description'], encryption_algorithm=ipsec_utils.ENCRYPTION_ALGORITHM_MAP[ ikepolicy['encryption_algorithm']], digest_algorithm=ipsec_utils.AUTH_ALGORITHM_MAP[ ikepolicy['auth_algorithm']], ike_version=ipsec_utils.IKE_VERSION_MAP[ ikepolicy['ike_version']], dh_group=ipsec_utils.PFS_MAP[ikepolicy['pfs']], sa_life_time=ikepolicy['lifetime']['value'], tags=self._nsx_tags(context, connection)) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create an ike profile: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return profile['id'] def _delete_ike_profile(self, ikeprofile_id): self._nsx_vpn.ike_profile.delete(ikeprofile_id) def _create_ipsec_profile(self, context, connection): """Create an ipsec profile for a connection""" # Note(asarfaty) the NSX profile can be reused, so we can consider # creating it only once in the future, and keeping a use-count for it. # There is no driver callback for profiles creation so it has to be # done on connection creation. ipsec_policy_id = connection['ipsecpolicy_id'] ipsecpolicy = self.vpn_plugin.get_ipsecpolicy( context, ipsec_policy_id) try: profile = self._nsx_vpn.tunnel_profile.create( ipsecpolicy['name'], description=ipsecpolicy['description'], encryption_algorithm=ipsec_utils.ENCRYPTION_ALGORITHM_MAP[ ipsecpolicy['encryption_algorithm']], digest_algorithm=ipsec_utils.AUTH_ALGORITHM_MAP[ ipsecpolicy['auth_algorithm']], dh_group=ipsec_utils.PFS_MAP[ipsecpolicy['pfs']], pfs=True, sa_life_time=ipsecpolicy['lifetime']['value'], tags=self._nsx_tags(context, connection)) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create a tunnel profile: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return profile['id'] def _delete_ipsec_profile(self, ipsecprofile_id): self._nsx_vpn.tunnel_profile.delete(ipsecprofile_id) def _create_dpd_profile(self, context, connection): dpd_info = connection['dpd'] try: profile = self._nsx_vpn.dpd_profile.create( connection['name'][:240] + '-dpd-profile', description='neutron dpd profile', timeout=dpd_info.get('timeout'), enabled=True if dpd_info.get('action') == 'hold' else False, tags=self._nsx_tags(context, connection)) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create a DPD profile: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return profile['id'] def _delete_dpd_profile(self, dpdprofile_id): self._nsx_vpn.dpd_profile.delete(dpdprofile_id) def _update_dpd_profile(self, connection, dpdprofile_id): dpd_info = connection['dpd'] self._nsx_vpn.dpd_profile.update(dpdprofile_id, timeout=dpd_info.get('timeout'), enabled=True if dpd_info.get('action') == 'hold' else False) def _create_peer_endpoint(self, context, connection, ikeprofile_id, ipsecprofile_id, dpdprofile_id): default_auth = vpn_ipsec.AuthenticationModeTypes.AUTH_MODE_PSK try: peer_endpoint = self._nsx_vpn.peer_endpoint.create( connection['name'], connection['peer_address'], connection['peer_id'], description=connection['description'], authentication_mode=default_auth, dpd_profile_id=dpdprofile_id, ike_profile_id=ikeprofile_id, ipsec_tunnel_profile_id=ipsecprofile_id, connection_initiation_mode=ipsec_utils.INITIATION_MODE_MAP[ connection['initiator']], psk=connection['psk'], tags=self._nsx_tags(context, connection)) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create a peer endpoint: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return peer_endpoint['id'] def _update_peer_endpoint(self, peer_ep_id, connection): self._nsx_vpn.peer_endpoint.update( peer_ep_id, name=connection['name'], peer_address=connection['peer_address'], peer_id=connection['peer_id'], description=connection['description'], connection_initiation_mode=ipsec_utils.INITIATION_MODE_MAP[ connection['initiator']], psk=connection['psk']) def _delete_peer_endpoint(self, peer_ep_id): self._nsx_vpn.peer_endpoint.delete(peer_ep_id) def _get_profiles_from_peer_endpoint(self, peer_ep_id): peer_ep = self._nsx_vpn.peer_endpoint.get(peer_ep_id) return ( peer_ep['ike_profile_id'], peer_ep['ipsec_tunnel_profile_id'], peer_ep['dpd_profile_id']) def _create_local_endpoint(self, context, local_addr, nsx_service_id, router_id): """Creating an NSX local endpoint for a logical router This endpoint can be reused by other connections, and will be deleted when the router is deleted or gateway is removed """ # Add the neutron router-id to the tags to help search later tags = self._nsxlib.build_v3_tags_payload( {'id': router_id}, resource_type='os-neutron-router-id', project_name=context.tenant_name) try: local_endpoint = self._nsx_vpn.local_endpoint.create( 'Local endpoint for OS VPNaaS', local_addr, nsx_service_id, tags=tags) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create a local endpoint: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return local_endpoint['id'] def _search_local_endpint(self, router_id): tags = [{'scope': 'os-neutron-router-id', 'tag': router_id}] ep_list = self._nsxlib.search_by_tags( tags=tags, resource_type=self._nsx_vpn.local_endpoint.resource_type) if ep_list['results']: return ep_list['results'][0]['id'] def _get_local_endpoint(self, context, connection, vpnservice): """Get the id of the local endpoint for a service The NSX allows only one local endpoint per local address This method will create it if there is not matching endpoint """ # use the router GW as the local ip router_id = vpnservice['router']['id'] # check if we already have this endpoint on the NSX local_ep_id = self._search_local_endpint(router_id) if local_ep_id: return local_ep_id # create a new one local_addr = vpnservice['external_v4_ip'] nsx_service_id = self._get_nsx_vpn_service(context, vpnservice) local_ep_id = self._create_local_endpoint( context, local_addr, nsx_service_id, router_id) return local_ep_id def _find_vpn_service_port(self, context, router_id): """Look for the neutron port created for the vpnservice of a router""" filters = {'device_id': [router_id], 'device_owner': [VPN_PORT_OWNER]} ports = self.l3_plugin.get_ports(context, filters=filters) if ports: return ports[0] def _delete_local_endpoint(self, resource, event, trigger, **kwargs): """Upon router deletion / gw removal delete the matching endpoint""" router_id = kwargs.get('router_id') # delete the local endpoint from the NSX local_ep_id = self._search_local_endpint(router_id) if local_ep_id: self._nsx_vpn.local_endpoint.delete(local_ep_id) # delete the neutron port with this IP ctx = n_context.get_admin_context() port = self._find_vpn_service_port(ctx, router_id) if port: self.l3_plugin.delete_port(ctx, port['id']) def validate_router_gw_info(self, context, router_id, gw_info): """Upon router gw update - verify no-snat""" # ckeck if this router has a vpn service filters = {'router_id': [router_id], 'status': [constants.ACTIVE]} services = self.vpn_plugin.get_vpnservices( context.elevated(), filters=filters) if services: # do not allow enable-snat if (gw_info and gw_info.get('enable_snat', cfg.CONF.enable_snat_by_default)): raise RouterWithSNAT(router_id=router_id) def _get_session_rules(self, context, connection, vpnservice): # TODO(asarfaty): support vpn-endpoint-groups too peer_cidrs = connection['peer_cidrs'] local_cidrs = [vpnservice['subnet']['cidr']] rule = self._nsx_vpn.session.get_rule_obj(local_cidrs, peer_cidrs) return [rule] def _create_session(self, context, connection, local_ep_id, peer_ep_id, rules): try: session = self._nsx_vpn.session.create( connection['name'], local_ep_id, peer_ep_id, rules, description=connection['description'], tags=self._nsx_tags(context, connection)) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create a session: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return session['id'] def _update_session(self, session_id, connection, rules): self._nsx_vpn.session.update( session_id, name=connection['name'], description=connection['description'], policy_rules=rules) def _delete_session(self, session_id): self._nsx_vpn.session.delete(session_id) def create_ipsec_site_connection(self, context, ipsec_site_conn): LOG.debug('Creating ipsec site connection %(conn_info)s.', {"conn_info": ipsec_site_conn}) # Note(asarfaty) the plugin already calls the validator # which also validated the policies and service ikeprofile_id = None ipsecprofile_id = None dpdprofile_id = None peer_ep_id = None session_id = None vpnservice_id = ipsec_site_conn['vpnservice_id'] vpnservice = self.service_plugin._get_vpnservice( context, vpnservice_id) ipsec_id = ipsec_site_conn["id"] try: # create the ike profile ikeprofile_id = self._create_ike_profile( context, ipsec_site_conn) LOG.debug("Created NSX ike profile %s", ikeprofile_id) # create the ipsec profile ipsecprofile_id = self._create_ipsec_profile( context, ipsec_site_conn) LOG.debug("Created NSX ipsec profile %s", ipsecprofile_id) # create the dpd profile dpdprofile_id = self._create_dpd_profile( context, ipsec_site_conn) LOG.debug("Created NSX dpd profile %s", dpdprofile_id) # create the peer endpoint and add to the DB peer_ep_id = self._create_peer_endpoint( context, ipsec_site_conn, ikeprofile_id, ipsecprofile_id, dpdprofile_id) LOG.debug("Created NSX peer endpoint %s", peer_ep_id) # create or reuse a local endpoint using the vpn service local_ep_id = self._get_local_endpoint( context, ipsec_site_conn, vpnservice) # Finally: create the session with policy rules rules = self._get_session_rules( context, ipsec_site_conn, vpnservice) session_id = self._create_session( context, ipsec_site_conn, local_ep_id, peer_ep_id, rules) # update the DB with the session id db.add_nsx_vpn_connection_mapping( context.session, ipsec_site_conn['id'], session_id, dpdprofile_id, ikeprofile_id, ipsecprofile_id, peer_ep_id) self._update_status(context, vpnservice_id, ipsec_id, constants.ACTIVE) except nsx_exc.NsxPluginException: with excutils.save_and_reraise_exception(): self._update_status(context, vpnservice_id, ipsec_id, constants.ERROR) # delete the NSX objects that were already created # Do not delete reused objects: service, local endpoint if session_id: self._delete_session(session_id) if peer_ep_id: self._delete_peer_endpoint(peer_ep_id) if dpdprofile_id: self._delete_dpd_profile(dpdprofile_id) if ipsecprofile_id: self._delete_ipsec_profile(ipsecprofile_id) if ikeprofile_id: self._delete_ike_profile(ikeprofile_id) # update router firewall rules self._update_firewall_rules(context, vpnservice) # update router advertisement rules self._update_router_advertisement(context, vpnservice) def delete_ipsec_site_connection(self, context, ipsec_site_conn): LOG.debug('Deleting ipsec site connection %(site)s.', {"site": ipsec_site_conn}) vpnservice_id = ipsec_site_conn['vpnservice_id'] vpnservice = self.service_plugin._get_vpnservice( context, vpnservice_id) # get all data from the nsx based on the connection id in the DB mapping = db.get_nsx_vpn_connection_mapping( context.session, ipsec_site_conn['id']) if not mapping: LOG.warning("Couldn't find nsx ids for VPN connection %s", ipsec_site_conn['id']) # Do not fail the deletion return if mapping['session_id']: self._delete_session(mapping['session_id']) if mapping['peer_ep_id']: self._delete_peer_endpoint(mapping['peer_ep_id']) if mapping['dpd_profile_id']: self._delete_dpd_profile(mapping['dpd_profile_id']) if mapping['ipsec_profile_id']: self._delete_ipsec_profile(mapping['ipsec_profile_id']) if mapping['ike_profile_id']: self._delete_ike_profile(mapping['ike_profile_id']) # Do not delete the local endpoint and service as they are reused db.delete_nsx_vpn_connection_mapping(context.session, ipsec_site_conn['id']) # update router firewall rules self._update_firewall_rules(context, vpnservice) # update router advertisement rules self._update_router_advertisement(context, vpnservice) def update_ipsec_site_connection(self, context, old_ipsec_conn, ipsec_site_conn): LOG.debug('Updating ipsec site connection new %(site)s.', {"site": ipsec_site_conn}) LOG.debug('Updating ipsec site connection old %(site)s.', {"site": old_ipsec_conn}) # Note(asarfaty) the plugin already calls the validator # which also validated the policies and service ipsec_id = old_ipsec_conn['id'] vpnservice_id = old_ipsec_conn['vpnservice_id'] vpnservice = self.service_plugin._get_vpnservice( context, vpnservice_id) mapping = db.get_nsx_vpn_connection_mapping( context.session, ipsec_site_conn['id']) if not mapping: LOG.error("Couldn't find nsx ids for VPN connection %s", ipsec_site_conn['id']) self._update_status(context, vpnservice_id, ipsec_id, "ERROR") raise nsx_exc.NsxIPsecVpnMappingNotFound(conn=ipsec_id) update_all = (old_ipsec_conn['name'] != ipsec_site_conn['name'] or old_ipsec_conn['description'] != ipsec_site_conn['description']) # check if the dpd configuration changed old_dpd = old_ipsec_conn['dpd'] new_dpd = ipsec_site_conn['dpd'] if (old_dpd['action'] != new_dpd['action'] or old_dpd['timeout'] != new_dpd['timeout'] or update_all): self._update_dpd_profile(ipsec_site_conn, mapping['dpd_profile_id']) # update peer endpoint with all the parameters that could be modified # Note(asarfaty): local endpoints are reusable and will not be updated self._update_peer_endpoint(mapping['peer_ep_id'], ipsec_site_conn) rules = self._get_session_rules( context, ipsec_site_conn, vpnservice) self._update_session(mapping['session_id'], ipsec_site_conn, rules) if 'peer_cidrs' in ipsec_site_conn: # Update firewall self._update_firewall_rules(context, vpnservice) # No service updates. No need to update router advertisement rules def _create_vpn_service(self, tier0_uuid): try: service = self._nsx_vpn.service.create( 'Neutron VPN service for router ' + tier0_uuid, tier0_uuid, enabled=True, ike_log_level=ipsec_utils.DEFAULT_LOG_LEVEL, tags=self._nsx_tags_for_reused()) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create vpn service: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return service['id'] def _get_tier0_uuid(self, context, router_id): router_db = self._core_plugin._get_router(context, router_id) return self._core_plugin._get_tier0_uuid_by_router(context, router_db) def _get_router_ext_gw(self, context, router_id): router_db = self._core_plugin.get_router(context, router_id) gw = router_db['external_gateway_info'] return gw['external_fixed_ips'][0]["ip_address"] def _find_vpn_service(self, tier0_uuid): # find the service for the tier0 router in the NSX. # Note(asarfaty) we expect only a small number of services services = self._nsx_vpn.service.list()['results'] for srv in services: if srv['logical_router_id'] == tier0_uuid: # if it exists but disabled: issue an error if not srv.get('enabled', True): msg = _("NSX vpn service %s must be enabled") % srv['id'] raise nsx_exc.NsxPluginException(err_msg=msg) return srv['id'] def _create_vpn_service_if_needed(self, context, vpnservice): # The service is created on the TIER0 router attached to the router GW # The NSX can keep only one service per tier0 router so we reuse it router_id = vpnservice['router_id'] tier0_uuid = self._get_tier0_uuid(context, router_id) if self._find_vpn_service(tier0_uuid): return # create a new one self._create_vpn_service(tier0_uuid) def _get_nsx_vpn_service(self, context, vpnservice): router_id = vpnservice['router_id'] tier0_uuid = self._get_tier0_uuid(context, router_id) return self._find_vpn_service(tier0_uuid) def _get_service_local_address(self, context, vpnservice): """Find/Allocate a port on the external network to save the ip to be used as the local ip of this service """ router_id = vpnservice.router['id'] # check if this router already have an IP port = self._find_vpn_service_port(context, router_id) if not port: # create a new port, on the external network of the router ext_net = vpnservice.router.gw_port['network_id'] port_data = { 'port': { 'network_id': ext_net, 'name': None, 'admin_state_up': True, 'device_id': vpnservice.router['id'], 'device_owner': VPN_PORT_OWNER, 'fixed_ips': constants.ATTR_NOT_SPECIFIED, 'mac_address': constants.ATTR_NOT_SPECIFIED, 'port_security_enabled': False, 'tenant_id': vpnservice['tenant_id']}} port = self.l3_plugin.base_create_port(context, port_data) # return the port ip as the local address return port['fixed_ips'][0]['ip_address'] def create_vpnservice(self, context, vpnservice): #TODO(asarfaty) support vpn-endpoint-group-create for local & peer # cidrs too LOG.debug('Creating VPN service %(vpn)s', {'vpn': vpnservice}) vpnservice_id = vpnservice['id'] vpnservice = self.service_plugin._get_vpnservice(context, vpnservice_id) try: self.validator.validate_vpnservice(context, vpnservice) local_address = self._get_service_local_address( context, vpnservice) except Exception: with excutils.save_and_reraise_exception(): # Rolling back change on the neutron self.service_plugin.delete_vpnservice(context, vpnservice_id) vpnservice['external_v4_ip'] = local_address self.service_plugin.set_external_tunnel_ips(context, vpnservice_id, v4_ip=local_address) self._create_vpn_service_if_needed(context, vpnservice) def update_vpnservice(self, context, old_vpnservice, vpnservice): # No meaningful field can change here pass def delete_vpnservice(self, context, vpnservice): # Do not delete the NSX service or DB entry as those will be reused. pass vmware-nsx-12.0.1/vmware_nsx/services/vpnaas/nsxv3/ipsec_utils.py0000666000175100017510000000341213244523345025217 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from vmware_nsxlib.v3 import vpn_ipsec ENCRYPTION_ALGORITHM_MAP = { 'aes-128': vpn_ipsec.EncryptionAlgorithmTypes.ENCRYPTION_ALGORITHM_128, 'aes-256': vpn_ipsec.EncryptionAlgorithmTypes.ENCRYPTION_ALGORITHM_256, } AUTH_ALGORITHM_MAP = { 'sha1': vpn_ipsec.DigestAlgorithmTypes.DIGEST_ALGORITHM_SHA1, 'sha256': vpn_ipsec.DigestAlgorithmTypes.DIGEST_ALGORITHM_SHA256, } PFS_MAP = { 'group14': vpn_ipsec.DHGroupTypes.DH_GROUP_14 } IKE_VERSION_MAP = { 'v1': vpn_ipsec.IkeVersionTypes.IKE_VERSION_V1, 'v2': vpn_ipsec.IkeVersionTypes.IKE_VERSION_V2, } ENCAPSULATION_MODE_MAP = { 'tunnel': vpn_ipsec.EncapsulationModeTypes.ENCAPSULATION_MODE_TUNNEL } TRANSFORM_PROTOCOL_MAP = { 'esp': vpn_ipsec.TransformProtocolTypes.TRANSFORM_PROTOCOL_ESP } DPD_ACTION_MAP = { 'hold': vpn_ipsec.DpdProfileActionTypes.DPD_PROFILE_ACTION_HOLD, 'disabled': None } INITIATION_MODE_MAP = { 'bi-directional': (vpn_ipsec.ConnectionInitiationModeTypes. INITIATION_MODE_INITIATOR), 'response-only': (vpn_ipsec.ConnectionInitiationModeTypes. INITIATION_MODE_RESPOND_ONLY) } DEFAULT_LOG_LEVEL = vpn_ipsec.IkeLogLevelTypes.LOG_LEVEL_ERROR vmware-nsx-12.0.1/vmware_nsx/services/vpnaas/nsxv3/__init__.py0000666000175100017510000000000013244523345024421 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/vpnaas/nsxv3/ipsec_validator.py0000666000175100017510000004413213244523345026050 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_log import log as logging from neutron_lib import constants from neutron_vpnaas.db.vpn import vpn_validator from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.extensions import projectpluginmap from vmware_nsx.services.vpnaas.nsxv3 import ipsec_utils from vmware_nsxlib.v3 import nsx_constants as consts from vmware_nsxlib.v3 import vpn_ipsec LOG = logging.getLogger(__name__) class IPsecV3Validator(vpn_validator.VpnReferenceValidator): """Validator methods for Vmware NSX-V3 VPN support""" def __init__(self, service_plugin): super(IPsecV3Validator, self).__init__() self.vpn_plugin = service_plugin self._core_plugin = self.core_plugin if self._core_plugin.is_tvd_plugin(): self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_T) self.nsxlib = self._core_plugin.nsxlib self.check_backend_version() def check_backend_version(self): if not self.nsxlib.feature_supported(consts.FEATURE_IPSEC_VPN): # ipsec vpn is not supported LOG.warning("VPNaaS is not supported by the NSX backend (version " "%s)", self.nsxlib.get_version()) self.backend_support = False else: self.backend_support = True def _validate_backend_version(self): if not self.backend_support: msg = (_("VPNaaS is not supported by the NSX backend " "(version %s)") % self.nsxlib.get_version()) raise nsx_exc.NsxVpnValidationError(details=msg) def _validate_policy_lifetime(self, policy_info, policy_type): """NSX supports only units=seconds""" lifetime = policy_info.get('lifetime') if not lifetime: return if lifetime.get('units') != 'seconds': msg = _("Unsupported policy lifetime %(val)s in %(pol)s policy. " "Only seconds lifetime is supported.") % { 'val': lifetime, 'pol': policy_type} raise nsx_exc.NsxVpnValidationError(details=msg) value = lifetime.get('value') if policy_type == 'IKE': limits = vpn_ipsec.IkeSALifetimeLimits else: limits = vpn_ipsec.IPsecSALifetimeLimits if (value and (value < limits.SA_LIFETIME_MIN or value > limits.SA_LIFETIME_MAX)): msg = _("Unsupported policy lifetime %(value)s in %(pol)s policy. " "Value range is [%(min)s-%(max)s].") % { 'value': value, 'pol': policy_type, 'min': limits.SA_LIFETIME_MIN, 'max': limits.SA_LIFETIME_MAX} raise nsx_exc.NsxVpnValidationError(details=msg) def _validate_policy_auth_algorithm(self, policy_info, policy_type): """NSX supports only SHA1 and SHA256""" auth = policy_info.get('auth_algorithm') if auth and auth not in ipsec_utils.AUTH_ALGORITHM_MAP: msg = _("Unsupported auth_algorithm: %(algo)s in %(pol)s policy. " "Please select one of the following supported algorithms: " "%(supported_algos)s") % { 'pol': policy_type, 'algo': auth, 'supported_algos': ipsec_utils.AUTH_ALGORITHM_MAP.keys()} raise nsx_exc.NsxVpnValidationError(details=msg) def _validate_policy_encryption_algorithm(self, policy_info, policy_type): encryption = policy_info.get('encryption_algorithm') if (encryption and encryption not in ipsec_utils.ENCRYPTION_ALGORITHM_MAP): msg = _("Unsupported encryption_algorithm: %(algo)s in %(pol)s " "policy. Please select one of the following supported " "algorithms: %(supported_algos)s") % { 'algo': encryption, 'pol': policy_type, 'supported_algos': ipsec_utils.ENCRYPTION_ALGORITHM_MAP.keys()} raise nsx_exc.NsxVpnValidationError(details=msg) def _validate_policy_pfs(self, policy_info, policy_type): pfs = policy_info.get('pfs') if pfs and pfs not in ipsec_utils.PFS_MAP: msg = _("Unsupported pfs: %(pfs)s in %(pol)s policy. Please " "select one of the following pfs: " "%(supported_pfs)s") % { 'pfs': pfs, 'pol': policy_type, 'supported_pfs': ipsec_utils.PFS_MAP.keys()} raise nsx_exc.NsxVpnValidationError(details=msg) def _validate_dpd(self, connection): dpd_info = connection.get('dpd') if not dpd_info: return action = dpd_info.get('action') if action not in ipsec_utils.DPD_ACTION_MAP.keys(): msg = _("Unsupported DPD action: %(action)s! Currently only " "%(supported)s is supported.") % { 'action': action, 'supported': ipsec_utils.DPD_ACTION_MAP.keys()} raise nsx_exc.NsxVpnValidationError(details=msg) timeout = dpd_info.get('timeout') if (timeout < vpn_ipsec.DpdProfileTimeoutLimits.DPD_TIMEOUT_MIN or timeout > vpn_ipsec.DpdProfileTimeoutLimits.DPD_TIMEOUT_MAX): msg = _("Unsupported DPD timeout: %(timeout)s. Value range is " "[%(min)s-%(max)s].") % { 'timeout': timeout, 'min': vpn_ipsec.DpdProfileTimeoutLimits.DPD_TIMEOUT_MIN, 'max': vpn_ipsec.DpdProfileTimeoutLimits.DPD_TIMEOUT_MAX} raise nsx_exc.NsxVpnValidationError(details=msg) def _validate_psk(self, connection): if 'psk' in connection and not connection['psk']: msg = _("'psk' cannot be empty or null when authentication " "mode is psk") raise nsx_exc.NsxVpnValidationError(details=msg) def _check_policy_rules_overlap(self, context, ipsec_site_conn): """validate no overlapping policy rules The nsx does not support overlapping policy rules cross all tenants, and tier0 routers """ connections = self.vpn_plugin.get_ipsec_site_connections( context.elevated()) if not connections: return vpnservice_id = ipsec_site_conn.get('vpnservice_id') vpnservice = self.vpn_plugin._get_vpnservice(context, vpnservice_id) local_cidrs = [vpnservice['subnet']['cidr']] peer_cidrs = ipsec_site_conn['peer_cidrs'] for conn in connections: # skip this connection and connections in non active state if (conn['id'] == ipsec_site_conn.get('id') or conn['status'] != constants.ACTIVE): continue # TODO(asarfaty): support peer groups too # check if it overlaps with the peer cidrs conn_peer_cidrs = conn['peer_cidrs'] if netaddr.IPSet(conn_peer_cidrs) & netaddr.IPSet(peer_cidrs): # check if the local cidr also overlaps con_service_id = conn.get('vpnservice_id') con_service = self.vpn_plugin._get_vpnservice( context.elevated(), con_service_id) conn_local_cidr = [con_service['subnet']['cidr']] if netaddr.IPSet(conn_local_cidr) & netaddr.IPSet(local_cidrs): msg = (_("Cannot create a connection with overlapping " "local and peer cidrs (%(local)s and %(peer)s) " "as connection %(id)s") % {'local': local_cidrs, 'peer': peer_cidrs, 'id': conn['id']}) raise nsx_exc.NsxVpnValidationError(details=msg) def _check_unique_addresses(self, context, ipsec_site_conn): """Validate no repeating local & peer addresses (of all tenants) The nsx does not support it cross all tenants, and tier0 routers """ vpnservice_id = ipsec_site_conn.get('vpnservice_id') local_addr = self._get_service_local_address(context, vpnservice_id) peer_address = ipsec_site_conn.get('peer_address') filters = {'peer_address': [peer_address]} connections = self.vpn_plugin.get_ipsec_site_connections( context.elevated(), filters=filters) for conn in connections: # skip this connection and connections in non active state if (conn['id'] == ipsec_site_conn.get('id') or conn['status'] != constants.ACTIVE): continue # this connection has the same peer addr as ours. # check the service local address srv_id = conn.get('vpnservice_id') srv_local = self._get_service_local_address( context.elevated(), srv_id) if srv_local == local_addr: msg = (_("Cannot create another connection with the same " "local address %(local)s and peer address %(peer)s " "as connection %(id)s") % {'local': local_addr, 'peer': peer_address, 'id': conn['id']}) raise nsx_exc.NsxVpnValidationError(details=msg) def _check_advertisment_overlap(self, context, ipsec_site_conn): """Validate there is no overlapping advertisement of networks The plugin advertise all no-snat routers networks + vpn local networks. The NSX does not allow different Tier1 router to advertise the same subnets """ admin_con = context.elevated() srv_id = ipsec_site_conn.get('vpnservice_id') srv = self.vpn_plugin._get_vpnservice(admin_con, srv_id) this_router = srv['router_id'] this_cidr = srv['subnet']['cidr'] # get all subnets of no-snat routers all_routers = self._core_plugin.get_routers(admin_con) nosnat_routers = [rtr for rtr in all_routers if (rtr['id'] != this_router and rtr.get('external_gateway_info') and not rtr['external_gateway_info'].get( 'enable_snat', True))] for rtr in nosnat_routers: if rtr['id'] == this_router: continue # go over the subnets of this router subnets = self._core_plugin._find_router_subnets_cidrs( admin_con, rtr['id']) if subnets and netaddr.IPSet(subnets) & netaddr.IPSet([this_cidr]): msg = (_("Cannot create connection with overlapping local " "cidrs %(local)s which was already advertised by " "no-snat router %(rtr)s") % {'local': subnets, 'rtr': rtr['id']}) raise nsx_exc.NsxVpnValidationError(details=msg) # add all vpn local subnets connections = self.vpn_plugin.get_ipsec_site_connections(admin_con) for conn in connections: # skip this connection and connections in non active state if (conn['id'] == ipsec_site_conn.get('id') or conn['status'] != constants.ACTIVE): continue # check the service local address conn_srv_id = conn.get('vpnservice_id') conn_srv = self.vpn_plugin._get_vpnservice(admin_con, conn_srv_id) if conn_srv['router_id'] == this_router: continue conn_cidr = conn_srv['subnet']['cidr'] if netaddr.IPSet([conn_cidr]) & netaddr.IPSet([this_cidr]): msg = (_("Cannot create connection with overlapping local " "cidr %(local)s which was already advertised by " "router %(rtr)s and connection %(conn)s") % { 'local': conn_cidr, 'rtr': conn_srv['router_id'], 'conn': conn['id']}) raise nsx_exc.NsxVpnValidationError(details=msg) # TODO(asarfaty): also add this validation when adding an interface # or no-snat to a router through the nsx-v3 plugin def validate_ipsec_site_connection(self, context, ipsec_site_conn): """Called upon create/update of a connection""" self._validate_backend_version() self._validate_dpd(ipsec_site_conn) self._validate_psk(ipsec_site_conn) ike_policy_id = ipsec_site_conn.get('ikepolicy_id') if ike_policy_id: ikepolicy = self.vpn_plugin.get_ikepolicy(context, ike_policy_id) self.validate_ike_policy(context, ikepolicy) ipsec_policy_id = ipsec_site_conn.get('ipsecpolicy_id') if ipsec_policy_id: ipsecpolicy = self.vpn_plugin.get_ipsecpolicy(context, ipsec_policy_id) self.validate_ipsec_policy(context, ipsecpolicy) if ipsec_site_conn.get('vpnservice_id'): self._check_advertisment_overlap(context, ipsec_site_conn) self._check_unique_addresses(context, ipsec_site_conn) self._check_policy_rules_overlap(context, ipsec_site_conn) #TODO(asarfaty): IPv6 is not yet supported. add validation def _get_service_local_address(self, context, vpnservice_id): """The local address of the service is assigned upon creation From the attached external network pool """ vpnservice = self.vpn_plugin._get_vpnservice(context, vpnservice_id) return vpnservice['external_v4_ip'] def _validate_router(self, context, router_id): # Verify that the router gw network is connected to an active-standby # Tier0 router router_db = self._core_plugin._get_router(context, router_id) tier0_uuid = self._core_plugin._get_tier0_uuid_by_router(context, router_db) # TODO(asarfaty): cache this result tier0_router = self.nsxlib.logical_router.get(tier0_uuid) if (not tier0_router or tier0_router.get('high_availability_mode') != 'ACTIVE_STANDBY'): msg = _("The router GW should be connected to a TIER-0 router " "with ACTIVE_STANDBY HA mode") raise nsx_exc.NsxVpnValidationError(details=msg) # Verify that this is a no-snat router if router_db.enable_snat: msg = _("VPN is supported only for routers with disabled SNAT") raise nsx_exc.NsxVpnValidationError(details=msg) def validate_vpnservice(self, context, vpnservice): """Called upon create/update of a service""" self._validate_backend_version() # Call general validations super(IPsecV3Validator, self).validate_vpnservice( context, vpnservice) # Call specific NSX validations self._validate_router(context, vpnservice['router_id']) if not vpnservice['subnet_id']: # we currently do not support multiple subnets so a subnet must # be defined msg = _("Subnet must be defined in a service") raise nsx_exc.NsxVpnValidationError(details=msg) #TODO(asarfaty): IPv6 is not yet supported. add validation def validate_ipsec_policy(self, context, ipsec_policy): # Call general validations super(IPsecV3Validator, self).validate_ipsec_policy( context, ipsec_policy) # Call specific NSX validations self._validate_policy_lifetime(ipsec_policy, "IPSec") self._validate_policy_auth_algorithm(ipsec_policy, "IPSec") self._validate_policy_encryption_algorithm(ipsec_policy, "IPSec") self._validate_policy_pfs(ipsec_policy, "IPSec") # Ensure IPSec policy encap mode is tunnel mode = ipsec_policy.get('encapsulation_mode') if mode and mode not in ipsec_utils.ENCAPSULATION_MODE_MAP.keys(): msg = _("Unsupported encapsulation mode: %s. Only 'tunnel' mode " "is supported.") % mode raise nsx_exc.NsxVpnValidationError(details=msg) # Ensure IPSec policy transform protocol is esp prot = ipsec_policy.get('transform_protocol') if prot and prot not in ipsec_utils.TRANSFORM_PROTOCOL_MAP.keys(): msg = _("Unsupported transform protocol: %s. Only 'esp' protocol " "is supported.") % prot raise nsx_exc.NsxVpnValidationError(details=msg) def validate_ike_policy(self, context, ike_policy): # Call general validations super(IPsecV3Validator, self).validate_ike_policy( context, ike_policy) # Call specific NSX validations self._validate_policy_lifetime(ike_policy, "IKE") self._validate_policy_auth_algorithm(ike_policy, "IKE") self._validate_policy_encryption_algorithm(ike_policy, "IKE") self._validate_policy_pfs(ike_policy, "IKE") # 'aggressive' phase1-negotiation-mode is not supported if ike_policy.get('phase1-negotiation-mode', 'main') != 'main': msg = _("Unsupported phase1-negotiation-mode: %s! Only 'main' is " "supported.") % ike_policy['phase1-negotiation-mode'] raise nsx_exc.NsxVpnValidationError(details=msg) vmware-nsx-12.0.1/vmware_nsx/services/vpnaas/nsx_tvd/0000775000175100017510000000000013244524600022717 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/vpnaas/nsx_tvd/ipsec_driver.py0000666000175100017510000001023513244523345025757 0ustar zuulzuul00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_lib.plugins import directory from neutron_vpnaas.services.vpn import service_drivers from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx import utils as tvd_utils from vmware_nsx.services.vpnaas.nsx_tvd import ipsec_validator from vmware_nsx.services.vpnaas.nsxv import ipsec_driver as v_driver from vmware_nsx.services.vpnaas.nsxv3 import ipsec_driver as t_driver LOG = logging.getLogger(__name__) IPSEC = 'ipsec' class NSXIPsecVpnDriver(service_drivers.VpnDriver): """Wrapper driver to select the relevant driver for each VPNaaS request""" def __init__(self, service_plugin): self.vpn_plugin = service_plugin self._core_plugin = directory.get_plugin() validator = ipsec_validator.IPsecValidator(service_plugin) super(NSXIPsecVpnDriver, self).__init__(service_plugin, validator) # supported drivers: self.drivers = {} try: self.drivers[projectpluginmap.NsxPlugins.NSX_T] = ( t_driver.NSXv3IPsecVpnDriver(service_plugin)) except Exception as e: LOG.error("NSXIPsecVpnDriver failed to initialize the NSX-T " "driver: %s", e) self.drivers[projectpluginmap.NsxPlugins.NSX_T] = None try: self.drivers[projectpluginmap.NsxPlugins.NSX_V] = ( v_driver.NSXvIPsecVpnDriver(service_plugin)) except Exception as e: LOG.error("NSXIPsecVpnDriver failed to initialize the NSX-V " "driver: %s", e) self.drivers[projectpluginmap.NsxPlugins.NSX_V] = None @property def service_type(self): return IPSEC def _get_driver_for_project(self, project): plugin_type = tvd_utils.get_tvd_plugin_type_for_project(project) if not self.drivers.get(plugin_type): msg = (_("Project %(project)s with plugin %(plugin)s has no " "support for VPNaaS"), {'project': project, 'plugin': plugin_type}) raise nsx_exc.NsxPluginException(err_msg=msg) return self.drivers[plugin_type] def create_ipsec_site_connection(self, context, ipsec_site_conn): d = self._get_driver_for_project(ipsec_site_conn['tenant_id']) return d.create_ipsec_site_connection(context, ipsec_site_conn) def delete_ipsec_site_connection(self, context, ipsec_site_conn): d = self._get_driver_for_project(ipsec_site_conn['tenant_id']) return d.delete_ipsec_site_connection(context, ipsec_site_conn) def update_ipsec_site_connection(self, context, old_ipsec_conn, ipsec_site_conn): d = self._get_driver_for_project(old_ipsec_conn['tenant_id']) return d.update_ipsec_site_connection(context, old_ipsec_conn, ipsec_site_conn) def create_vpnservice(self, context, vpnservice): d = self._get_driver_for_project(vpnservice['tenant_id']) return d.create_vpnservice(context, vpnservice) def update_vpnservice(self, context, old_vpnservice, vpnservice): pass def delete_vpnservice(self, context, vpnservice): pass def _generate_ipsecvpn_firewall_rules(self, plugin_type, context, **kargs): d = self.drivers.get(plugin_type) if d: return d._generate_ipsecvpn_firewall_rules( plugin_type, context, **kargs) return [] vmware-nsx-12.0.1/vmware_nsx/services/vpnaas/nsx_tvd/__init__.py0000666000175100017510000000000013244523345025025 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/vpnaas/nsx_tvd/ipsec_validator.py0000666000175100017510000001132613244523345026453 0ustar zuulzuul00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_vpnaas.db.vpn import vpn_validator from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx import utils as tvd_utils from vmware_nsx.services.vpnaas.nsxv import ipsec_validator as v_validator from vmware_nsx.services.vpnaas.nsxv3 import ipsec_validator as t_validator LOG = logging.getLogger(__name__) class IPsecValidator(vpn_validator.VpnReferenceValidator): """Wrapper validator for selecting the V/T validator to use""" def __init__(self, service_plugin): super(IPsecValidator, self).__init__() self.vpn_plugin = service_plugin # supported validatorss: self.validators = {} try: self.validators[projectpluginmap.NsxPlugins.NSX_T] = ( t_validator.IPsecV3Validator(service_plugin)) except Exception as e: LOG.error("IPsecValidator failed to initialize the NSX-T " "validator: %s", e) self.validators[projectpluginmap.NsxPlugins.NSX_T] = None try: self.validators[projectpluginmap.NsxPlugins.NSX_V] = ( v_validator.IPsecValidator(service_plugin)) except Exception as e: LOG.error("IPsecValidator failed to initialize the NSX-V " "validator: %s", e) self.validators[projectpluginmap.NsxPlugins.NSX_V] = None def _get_validator_for_project(self, project): plugin_type = tvd_utils.get_tvd_plugin_type_for_project(project) if not self.validators.get(plugin_type): msg = (_("Project %(project)s with plugin %(plugin)s has no " "support for VPNaaS"), {'project': project, 'plugin': plugin_type}) raise nsx_exc.NsxPluginException(err_msg=msg) return self.validators[plugin_type] def validate_ipsec_site_connection(self, context, ipsec_site_conn): if not ipsec_site_conn.get('tenant_id'): # nothing we can do here. return v = self._get_validator_for_project(ipsec_site_conn['tenant_id']) # first make sure the plugin is the same as the one of the vpnservice srv_id = ipsec_site_conn.get('vpnservice_id') srv = self.vpn_plugin._get_vpnservice(context, srv_id) srv_validator = self._get_validator_for_project(srv['tenant_id']) if v != srv_validator: err_msg = _('VPN service should belong to the same plugin ' 'as the connection') raise nsx_exc.NsxVpnValidationError(details=err_msg) return v.validate_ipsec_site_connection(context, ipsec_site_conn) def validate_vpnservice(self, context, vpnservice): if not vpnservice.get('tenant_id'): # This will happen during update. # nothing significant like router or subnet can be changes # so we can skip it. return v = self._get_validator_for_project(vpnservice['tenant_id']) # first make sure the router&subnet plugin matches the vpnservice router_id = vpnservice['router_id'] p = self.core_plugin._get_plugin_from_router_id(context, router_id) if self.validators.get(p.plugin_type()) != v: err_msg = _('Router & subnet should belong to the same plugin ' 'as the VPN service') raise nsx_exc.NsxVpnValidationError(details=err_msg) return v.validate_vpnservice(context, vpnservice) def validate_ipsec_policy(self, context, ipsec_policy): if not ipsec_policy.get('tenant_id'): # nothing we can do here return v = self._get_validator_for_project(ipsec_policy['tenant_id']) return v.validate_ipsec_policy(context, ipsec_policy) def validate_ike_policy(self, context, ike_policy): if not ike_policy.get('tenant_id'): # nothing we can do here return v = self._get_validator_for_project(ike_policy['tenant_id']) return v.validate_ike_policy(context, ike_policy) vmware-nsx-12.0.1/vmware_nsx/services/vpnaas/nsx_tvd/plugin.py0000666000175100017510000000216213244523345024577 0ustar zuulzuul00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_vpnaas.services.vpn import plugin from vmware_nsx.plugins.nsx import utils as tvd_utils @tvd_utils.filter_plugins class VPNPlugin(plugin.VPNDriverPlugin): """NSX-TV plugin for QoS. This plugin adds separation between T/V instances """ methods_to_separate = ['get_ipsec_site_connections', 'get_ikepolicies', 'get_ipsecpolicies', 'get_vpnservices', 'get_endpoint_groups'] vmware-nsx-12.0.1/vmware_nsx/services/fwaas/0000775000175100017510000000000013244524600021043 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/fwaas/nsx_v3/0000775000175100017510000000000013244524600022263 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/fwaas/nsx_v3/fwaas_callbacks_v2.py0000666000175100017510000001062413244523345026356 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_lib import constants as nl_constants from vmware_nsx.db import db as nsx_db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.services.fwaas.common import fwaas_callbacks_v2 as \ com_callbacks from vmware_nsx.services.fwaas.nsx_tv import edge_fwaas_driver_v2 as tv_driver LOG = logging.getLogger(__name__) class Nsxv3FwaasCallbacksV2(com_callbacks.NsxFwaasCallbacksV2): """NSX-V3 RPC callbacks for Firewall As A Service - V2.""" def __init__(self): super(Nsxv3FwaasCallbacksV2, self).__init__() # update the fwaas driver in case of TV plugin self.internal_driver = None if self.fwaas_enabled: if self.fwaas_driver.driver_name == tv_driver.FWAAS_DRIVER_NAME: self.internal_driver = self.fwaas_driver.get_T_driver() else: self.internal_driver = self.fwaas_driver @property def plugin_type(self): return projectpluginmap.NsxPlugins.NSX_T def should_apply_firewall_to_router(self, context, router_id): """Return True if the FWaaS rules should be added to this router.""" if not super(Nsxv3FwaasCallbacksV2, self).should_apply_firewall_to_router(context, router_id): return False # get all the relevant router info ctx_elevated = context.elevated() router_data = self.core_plugin.get_router(ctx_elevated, router_id) if not router_data: LOG.error("Couldn't read router %s data", router_id) return False # Check if the FWaaS driver supports this router if not self.internal_driver.should_apply_firewall_to_router( router_data): return False return True def get_port_rules(self, nsx_ls_id, fwg, plugin_rules): return self.internal_driver.get_port_translated_rules( nsx_ls_id, fwg, plugin_rules) def update_router_firewall(self, context, nsxlib, router_id, router_interfaces, nsx_router_id, section_id): """Rewrite all the FWaaS v2 rules in the router edge firewall This method should be called on FWaaS updates, and on router interfaces changes. """ fw_rules = [] # Add firewall rules per port attached to a firewall group for port in router_interfaces: nsx_ls_id, _nsx_port_id = nsx_db.get_nsx_switch_and_port_id( context.session, port['id']) # Check if this port has a firewall fwg = self.get_port_fwg(context, port['id']) if fwg: # Add plugin additional allow rules plugin_rules = self.core_plugin.get_extra_fw_rules( context, router_id, port['id']) # add the FWaaS rules for this port # ingress/egress firewall rules + default ingress/egress drop # rule for this port fw_rules.extend(self.get_port_rules(nsx_ls_id, fwg, plugin_rules)) # add a default allow-all rule to all other traffic & ports fw_rules.append(self.internal_driver.get_default_backend_rule( section_id, allow_all=True)) # update the backend router firewall nsxlib.firewall_section.update(section_id, rules=fw_rules) def delete_port(self, context, port_id): # Mark the FW group as inactive if this is the last port fwg = self.get_port_fwg(context, port_id) if (fwg and fwg.get('status') == nl_constants.ACTIVE and len(fwg.get('ports', [])) <= 1): self.fwplugin_rpc.set_firewall_group_status( context, fwg['id'], nl_constants.INACTIVE) vmware-nsx-12.0.1/vmware_nsx/services/fwaas/nsx_v3/fwaas_callbacks_v1.py0000666000175100017510000000755313244523345026364 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from vmware_nsx.extensions import projectpluginmap from vmware_nsx.services.fwaas.common import fwaas_callbacks_v1 as com_clbcks from vmware_nsx.services.fwaas.nsx_tv import edge_fwaas_driver_v1 as tv_driver LOG = logging.getLogger(__name__) class Nsxv3FwaasCallbacksV1(com_clbcks.NsxFwaasCallbacks): """NSX-V3 RPC callbacks for Firewall As A Service - V1.""" def __init__(self): super(Nsxv3FwaasCallbacksV1, self).__init__() # update the fwaas driver in case of TV plugin if self.fwaas_enabled: if self.fwaas_driver.driver_name == tv_driver.FWAAS_DRIVER_NAME: self.internal_driver = self.fwaas_driver.get_T_driver() else: self.internal_driver = self.fwaas_driver @property def plugin_type(self): return projectpluginmap.NsxPlugins.NSX_T def should_apply_firewall_to_router(self, context, router_id): """Return True if the FWaaS rules should be added to this router.""" if not super(Nsxv3FwaasCallbacksV1, self).should_apply_firewall_to_router(context, router_id): return False # get all the relevant router info ctx_elevated = context.elevated() router_data = self.core_plugin.get_router(ctx_elevated, router_id) if not router_data: LOG.error("Couldn't read router %s data", router_id) return False # Check if the FWaaS driver supports this router if not self.internal_driver.should_apply_firewall_to_router( router_data): return False return True def update_router_firewall(self, context, nsxlib, router_id, router_interfaces, nsx_router_id, section_id): """Rewrite all the FWaaS v1 rules in the router edge firewall This method should be called on FWaaS updates, and on router interfaces changes. """ fw_rules = [] fw_id = None if self.should_apply_firewall_to_router(context, router_id): # Find the firewall attached to this router # (must have one since should_apply returned true) firewall = self.get_router_firewall(context, router_id) fw_id = firewall['id'] # Add the FW rules fw_rules.extend(self.internal_driver.get_router_translated_rules( router_id, firewall)) # Add plugin additional allow rules fw_rules.extend(self.core_plugin.get_extra_fw_rules( context, router_id)) # Add the default drop all rule fw_rules.append(self.internal_driver.get_default_backend_rule( section_id, allow_all=False)) else: # default allow all rule fw_rules.append(self.internal_driver.get_default_backend_rule( section_id, allow_all=True)) # update the backend nsxlib.firewall_section.update(section_id, rules=fw_rules) # Also update the router tags self.internal_driver.update_nsx_router_tags(nsx_router_id, fw_id=fw_id) def delete_port(self, context, port_id): # nothing to do in FWaaS v1 pass vmware-nsx-12.0.1/vmware_nsx/services/fwaas/nsx_v3/edge_fwaas_driver_base.py0000666000175100017510000002171013244523345027277 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_fwaas.services.firewall.drivers import fwaas_base from neutron_lib.api.definitions import constants as fwaas_consts from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib.plugins import directory from oslo_log import log as logging from vmware_nsx.extensions import projectpluginmap from vmware_nsxlib.v3 import nsx_constants as consts LOG = logging.getLogger(__name__) RULE_NAME_PREFIX = 'Fwaas-' DEFAULT_RULE_NAME = 'Default LR Layer3 Rule' class CommonEdgeFwaasV3Driver(fwaas_base.FwaasDriverBase): """Base class for NSX-V3 driver for Firewall As A Service - V1 & V2.""" def __init__(self, driver_exception, driver_name): super(CommonEdgeFwaasV3Driver, self).__init__() self.driver_name = driver_name self.backend_support = True self.driver_exception = driver_exception registry.subscribe( self.check_backend_version, resources.PROCESS, events.BEFORE_SPAWN) self._core_plugin = None @property def core_plugin(self): """Get the NSX-V3 core plugin""" if not self._core_plugin: self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin(): self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_T) if not self._core_plugin: # The nsx-t plugin was not initialized return # make sure plugin init was completed if not self._core_plugin.init_is_complete: self._core_plugin.init_complete(None, None, {}) return self._core_plugin @property def nsxlib(self): return self.core_plugin.nsxlib @property def nsx_firewall(self): return self.nsxlib.firewall_section @property def nsx_router(self): return self.nsxlib.logical_router def check_backend_version(self, resource, event, trigger, payload=None): if (self.core_plugin and not self.nsxlib.feature_supported(consts.FEATURE_ROUTER_FIREWALL)): # router firewall is not supported LOG.warning("FWaaS is not supported by the NSX backend (version " "%s): Router firewall is not supported", self.nsxlib.get_version()) self.backend_support = False def should_apply_firewall_to_router(self, router_data): """Return True if the firewall rules should be added the router Right now the driver supports for all routers. """ return True def _translate_action(self, fwaas_action, fwaas_rule_id): """Translate FWaaS action to NSX action""" if fwaas_action == fwaas_consts.FWAAS_ALLOW: return consts.FW_ACTION_ALLOW if fwaas_action == fwaas_consts.FWAAS_DENY: return consts.FW_ACTION_DROP if fwaas_action == fwaas_consts.FWAAS_REJECT: # reject is not supported by the nsx router firewall LOG.warning("Reject action is not supported by the NSX backend " "for router firewall. Using %(action)s instead for " "rule %(id)s", {'action': consts.FW_ACTION_DROP, 'id': fwaas_rule_id}) return consts.FW_ACTION_DROP # Unexpected action LOG.error("Unsupported FWAAS action %(action)s for rule %(id)s", { 'action': fwaas_action, 'id': fwaas_rule_id}) raise self.driver_exception(driver=self.driver_name) def _translate_cidr(self, cidr): return self.nsx_firewall.get_ip_cidr_reference( cidr, consts.IPV6 if netaddr.valid_ipv6(cidr) else consts.IPV4) def translate_addresses_to_target(self, cidrs): return [self._translate_cidr(ip) for ip in cidrs] @staticmethod def _translate_protocol(fwaas_protocol): """Translate FWaaS L4 protocol to NSX protocol""" if fwaas_protocol.lower() == 'tcp': return consts.TCP if fwaas_protocol.lower() == 'udp': return consts.UDP if fwaas_protocol.lower() == 'icmp': # This will cover icmpv6 too, when adding the rule. return consts.ICMPV4 @staticmethod def _translate_ports(ports): return [ports.replace(':', '-')] def _translate_services(self, fwaas_rule): l4_protocol = self._translate_protocol(fwaas_rule['protocol']) if l4_protocol in [consts.TCP, consts.UDP]: source_ports = [] destination_ports = [] if fwaas_rule.get('source_port'): source_ports = self._translate_ports( fwaas_rule['source_port']) if fwaas_rule.get('destination_port'): destination_ports = self._translate_ports( fwaas_rule['destination_port']) return [self.nsx_firewall.get_nsservice( consts.L4_PORT_SET_NSSERVICE, l4_protocol=l4_protocol, source_ports=source_ports, destination_ports=destination_ports)] elif l4_protocol == consts.ICMPV4: # Add both icmp v4 & v6 services return [ self.nsx_firewall.get_nsservice( consts.ICMP_TYPE_NSSERVICE, protocol=consts.ICMPV4), self.nsx_firewall.get_nsservice( consts.ICMP_TYPE_NSSERVICE, protocol=consts.ICMPV6), ] def _translate_rules(self, fwaas_rules, replace_src=None, replace_dest=None, logged=False): translated_rules = [] for rule in fwaas_rules: nsx_rule = {} if not rule['enabled']: # skip disabled rules continue # Make sure the rule has a name, and it starts with the prefix # (backend max name length is 255) if rule.get('name'): name = RULE_NAME_PREFIX + rule['name'] else: name = RULE_NAME_PREFIX + rule['id'] nsx_rule['display_name'] = name[:255] if rule.get('description'): nsx_rule['notes'] = rule['description'] nsx_rule['action'] = self._translate_action( rule['action'], rule['id']) if replace_dest: # set this value as the destination logical switch, # and set the rule to ingress nsx_rule['destinations'] = [{'target_type': 'LogicalSwitch', 'target_id': replace_dest}] nsx_rule['direction'] = 'IN' elif rule.get('destination_ip_address'): nsx_rule['destinations'] = self.translate_addresses_to_target( [rule['destination_ip_address']]) if replace_src: # set this value as the source logical switch, # and set the rule to egress nsx_rule['sources'] = [{'target_type': 'LogicalSwitch', 'target_id': replace_src}] nsx_rule['direction'] = 'OUT' elif rule.get('source_ip_address'): nsx_rule['sources'] = self.translate_addresses_to_target( [rule['source_ip_address']]) if rule.get('protocol'): nsx_rule['services'] = self._translate_services(rule) if logged: nsx_rule['logged'] = logged translated_rules.append(nsx_rule) return translated_rules def validate_backend_version(self): # prevent firewall actions if the backend does not support it if not self.backend_support: LOG.error("The NSX backend does not support router firewall") raise self.driver_exception(driver=self.driver_name) def get_default_backend_rule(self, section_id, allow_all=True): # Add default allow all rule old_default_rule = self.nsx_firewall.get_default_rule( section_id) return { 'display_name': DEFAULT_RULE_NAME, 'action': (consts.FW_ACTION_ALLOW if allow_all else consts.FW_ACTION_DROP), 'is_default': True, 'id': old_default_rule['id'] if old_default_rule else 0} vmware-nsx-12.0.1/vmware_nsx/services/fwaas/nsx_v3/__init__.py0000666000175100017510000000000013244523345024371 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/fwaas/nsx_v3/edge_fwaas_driver_v1.py0000666000175100017510000001106413244523345026714 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import context as n_context from oslo_log import helpers as log_helpers from oslo_log import log as logging from neutron_lib.exceptions import firewall_v1 as exceptions from vmware_nsx.services.fwaas.nsx_v3 import edge_fwaas_driver_base as \ base_driver LOG = logging.getLogger(__name__) FWAAS_DRIVER_NAME = 'Fwaas V1 NSX-V3 driver' NSX_FW_TAG = 'os-neutron-fw-id' class EdgeFwaasV3DriverV1(base_driver.CommonEdgeFwaasV3Driver): """NSX-V3 driver for Firewall As A Service - V1.""" def __init__(self): exception_cls = exceptions.FirewallInternalDriverError super(EdgeFwaasV3DriverV1, self).__init__(exception_cls, FWAAS_DRIVER_NAME) @log_helpers.log_method_call def create_firewall(self, agent_mode, apply_list, firewall): """Create the Firewall with a given policy. """ self._update_backend_routers(apply_list, firewall['id']) @log_helpers.log_method_call def update_firewall(self, agent_mode, apply_list, firewall): """Remove previous policy and apply the new policy.""" self._update_backend_routers(apply_list, firewall['id']) @log_helpers.log_method_call def delete_firewall(self, agent_mode, apply_list, firewall): """Delete firewall. Removes rules created by this instance from the backend firewall And add the default allow rule. """ self._update_backend_routers(apply_list, firewall['id']) @log_helpers.log_method_call def apply_default_policy(self, agent_mode, apply_list, firewall): """Apply the default policy (deny all). The backend firewall always has this policy (=deny all) as default, so we only need to delete the current rules. """ self._update_backend_routers(apply_list, firewall['id']) def _update_backend_routers(self, apply_list, fw_id): """"Update each router on the backend using the core plugin code""" self.validate_backend_version() context = n_context.get_admin_context() for router_info in apply_list: # Skip unsupported routers if not self.should_apply_firewall_to_router(router_info.router): continue self.core_plugin.update_router_firewall( context, router_info.router_id) def update_nsx_router_tags(self, nsx_router_id, fw_id=None): """Update the backend router with tags marking the attached fw id""" # Get the current tags nsx_router = self.nsx_router.get(nsx_router_id) if 'tags' not in nsx_router: nsx_router['tags'] = [] tags = nsx_router['tags'] # Look for the firewall tag and update/remove it update_tags = False found_tag = False for tag in tags: if tag.get('scope') == NSX_FW_TAG: found_tag = True if not fw_id: tags.remove(tag) update_tags = True break if fw_id != tag.get('tag'): tag['tag'] = fw_id update_tags = True break # Add the tag if not found if fw_id and not found_tag: tags.append({'scope': NSX_FW_TAG, 'tag': fw_id}) update_tags = True # update tags on the backend router if update_tags: self.nsx_router.update(nsx_router_id, tags=tags) def get_router_translated_rules(self, router_id, firewall): """Return the list of translated rules The default drop all will be added later """ # Return the firewall rules only if the fw is up if firewall['admin_state_up']: # TODO(asarfaty): get this value from the firewall extensions logged = False return self._translate_rules(firewall['firewall_rule_list'], logged=logged) return [] vmware-nsx-12.0.1/vmware_nsx/services/fwaas/nsx_v3/edge_fwaas_driver_v2.py0000666000175100017510000001351113244523345026714 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import context as n_context from oslo_log import helpers as log_helpers from oslo_log import log as logging from neutron_lib.exceptions import firewall_v2 as exceptions from vmware_nsx.services.fwaas.nsx_v3 import edge_fwaas_driver_base \ as base_driver from vmware_nsxlib.v3 import nsx_constants as consts LOG = logging.getLogger(__name__) FWAAS_DRIVER_NAME = 'Fwaas V2 NSX-V3 driver' class EdgeFwaasV3DriverV2(base_driver.CommonEdgeFwaasV3Driver): """NSX-V3 driver for Firewall As A Service - V2.""" def __init__(self): exception_cls = exceptions.FirewallInternalDriverError super(EdgeFwaasV3DriverV2, self).__init__(exception_cls, FWAAS_DRIVER_NAME) @log_helpers.log_method_call def create_firewall_group(self, agent_mode, apply_list, firewall_group): """Create the Firewall with a given policy. """ self._validate_firewall_group(firewall_group) self._update_backend_routers(apply_list, firewall_group['id']) @log_helpers.log_method_call def update_firewall_group(self, agent_mode, apply_list, firewall_group): """Remove previous policy and apply the new policy.""" self._validate_firewall_group(firewall_group) self._update_backend_routers(apply_list, firewall_group['id']) @log_helpers.log_method_call def delete_firewall_group(self, agent_mode, apply_list, firewall_group): """Delete firewall. Removes rules created by this instance from the backend firewall And add the default allow rule. """ self._update_backend_routers(apply_list, firewall_group['id']) @log_helpers.log_method_call def apply_default_policy(self, agent_mode, apply_list, firewall_group): """Apply the default policy (deny all). The backend firewall always has this policy (=deny all) as default, so we only need to delete the current rules. """ self._update_backend_routers(apply_list, firewall_group['id']) def _update_backend_routers(self, apply_list, fwg_id): """Update all the affected router on the backend""" self.validate_backend_version() LOG.info("Updating routers firewall for firewall group %s", fwg_id) context = n_context.get_admin_context() routers = set() # the apply_list is a list of tuples: routerInfo, port-id for router_info, port_id in apply_list: # Skip unsupported routers if not self.should_apply_firewall_to_router(router_info.router): continue routers.add(router_info.router_id) # update each router once for router_id in routers: self.core_plugin.update_router_firewall(context, router_id) def get_port_translated_rules(self, nsx_ls_id, firewall_group, plugin_rules): """Return the list of translated rules per port""" port_rules = [] # TODO(asarfaty): get this value from the firewall group extensions logged = False # Add the firewall group ingress/egress rules only if the fw is up if firewall_group['admin_state_up']: port_rules.extend(self._translate_rules( firewall_group['ingress_rule_list'], replace_dest=nsx_ls_id, logged=logged)) port_rules.extend(self._translate_rules( firewall_group['egress_rule_list'], replace_src=nsx_ls_id, logged=logged)) # Add the per-port plugin rules if plugin_rules and isinstance(plugin_rules, list): port_rules.extend(plugin_rules) # Add ingress/egress block rules for this port port_rules.extend([ {'display_name': "Block port ingress", 'action': consts.FW_ACTION_DROP, 'destinations': [{'target_type': 'LogicalSwitch', 'target_id': nsx_ls_id}], 'direction': 'IN'}, {'display_name': "Block port egress", 'action': consts.FW_ACTION_DROP, 'sources': [{'target_type': 'LogicalSwitch', 'target_id': nsx_ls_id}], 'direction': 'OUT'}]) return port_rules def _validate_firewall_group(self, firewall_group): """Validate the rules in the firewall group""" for rule in firewall_group['egress_rule_list']: if rule.get('source_ip_address'): # this rule cannot be used as egress rule LOG.error("Rule %(id)s cannot be used in an egress " "policy because it has a source", {'id': rule['id']}) raise exceptions.FirewallInternalDriverError( driver=FWAAS_DRIVER_NAME) for rule in firewall_group['ingress_rule_list']: if rule.get('destination_ip_address'): # this rule cannot be used as ingress rule LOG.error("Rule %(id)s cannot be used in an ingress " "policy because it has a destination", {'id': rule['id']}) raise exceptions.FirewallInternalDriverError( driver=FWAAS_DRIVER_NAME) vmware-nsx-12.0.1/vmware_nsx/services/fwaas/__init__.py0000666000175100017510000000000013244523345023151 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/fwaas/common/0000775000175100017510000000000013244524600022333 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/fwaas/common/fwaas_callbacks_v2.py0000666000175100017510000001716513244523345026435 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron.agent.l3 import router_info from neutron.common import config as neutron_config # noqa from neutron_fwaas.db.firewall.v2 import firewall_db_v2 from neutron_fwaas.services.firewall.agents.l3reference \ import firewall_l3_agent_v2 from neutron_lib import constants as nl_constants from neutron_lib import context as n_context from neutron_lib.plugins import directory LOG = logging.getLogger(__name__) class DummyAgentApi(object): def is_router_in_namespace(self, router_id): return True class NsxFwaasCallbacksV2(firewall_l3_agent_v2.L3WithFWaaS): """Common NSX RPC callbacks for Firewall As A Service - V2.""" def __init__(self): # The super code needs a configuration object with the neutron host # and an agent_mode, which our driver doesn't use. neutron_conf = cfg.CONF neutron_conf.agent_mode = 'nsx' super(NsxFwaasCallbacksV2, self).__init__(conf=neutron_conf) self.agent_api = DummyAgentApi() self._core_plugin = None @property def plugin_type(self): pass @property def core_plugin(self): """Get the NSX-V3 core plugin""" if not self._core_plugin: self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin(): # get the plugin that match this driver self._core_plugin = self._core_plugin.get_plugin_by_type( self.plugin_type) return self._core_plugin # Override functions using the agent_api that is not used by our plugin def _get_firewall_group_ports(self, context, firewall_group, to_delete=False, require_new_plugin=False): """Returns in-namespace ports, either from firewall group dict if newer version of plugin or from project routers otherwise. NOTE: Vernacular move from "tenant" to "project" doesn't yet appear as a key in router or firewall group objects. """ fwg_port_ids = [] if self._has_port_insertion_fields(firewall_group): if to_delete: fwg_port_ids = firewall_group['del-port-ids'] else: fwg_port_ids = firewall_group['add-port-ids'] elif not require_new_plugin: routers = self._get_routers_in_project( context, firewall_group['tenant_id']) for router in routers: if router.router['tenant_id'] == firewall_group['tenant_id']: fwg_port_ids.extend([p['id'] for p in router.internal_ports]) # Return in-namespace port objects. ports = self._get_in_ns_ports(fwg_port_ids, ignore_errors=to_delete) # On illegal ports - change FW status to Error if ports is None: self.fwplugin_rpc.set_firewall_group_status( context, firewall_group['id'], nl_constants.ERROR) return ports def _get_in_ns_ports(self, port_ids, ignore_errors=False): """Returns port objects in the local namespace, along with their router_info. """ context = n_context.get_admin_context() in_ns_ports = {} # This will be converted to a list later. for port_id in port_ids: # find the router of this port: port = self.core_plugin.get_port(context, port_id) # verify that this is a router interface port if port['device_owner'] != nl_constants.DEVICE_OWNER_ROUTER_INTF: if not ignore_errors: LOG.error("NSX-V3 FWaaS V2 plugin does not support %s " "ports", port['device_owner']) return else: router_id = port['device_id'] router = self.core_plugin.get_router(context, router_id) router_info = self._router_dict_to_obj(router) if router_info: if router_info in in_ns_ports: in_ns_ports[router_info].append(port_id) else: in_ns_ports[router_info] = [port_id] return list(in_ns_ports.items()) def delete_firewall_group(self, context, firewall_group, host): """Handles RPC from plugin to delete a firewall group. This method is overridden here in order to handle routers in Error state without ports, and make sure those are deleted. """ ports_for_fwg = self._get_firewall_group_ports( context, firewall_group, to_delete=True) if not ports_for_fwg: # FW without ports should be deleted without calling the driver self.fwplugin_rpc.firewall_group_deleted( context, firewall_group['id']) return return super(NsxFwaasCallbacksV2, self).delete_firewall_group( context, firewall_group, host) def _get_routers_in_project(self, context, project_id): return self.core_plugin.get_routers( context, filters={'project_id': [project_id]}) def _router_dict_to_obj(self, r): # The callbacks expect a router-info object return router_info.RouterInfo( None, r['id'], router=r, agent_conf=None, interface_driver=None, use_ipv6=False) def get_port_fwg(self, context, port_id): """Return the firewall group of this port if the FWaaS rules should be added to the backend router. """ if not self.fwaas_enabled: return False ctx = context.elevated() fwg_id = self._get_port_firewall_group_id(ctx, port_id) if fwg_id is None: # No FWaas Firewall was assigned to this port return # check the state of this firewall group fwg = self._get_fw_group_from_plugin(ctx, fwg_id) if fwg is not None: if fwg.get('status') in (nl_constants.ERROR, nl_constants.PENDING_DELETE): # Do not add rules of firewalls with errors LOG.warning("Port %(port)s will not get rules from firewall " "group %(fwg)s which is in %(status)s", {'port': port_id, 'fwg': fwg_id, 'status': fwg['status']}) return return fwg def _get_fw_group_from_plugin(self, context, fwg_id): # NOTE(asarfaty): currently there is no api to get a specific firewall fwg_list = self.fwplugin_rpc.get_firewall_groups_for_project(context) for fwg in fwg_list: if fwg['id'] == fwg_id: return fwg # TODO(asarfaty): add this api to fwaas firewall_db_v2 def _get_port_firewall_group_id(self, context, port_id): entry = context.session.query( firewall_db_v2.FirewallGroupPortAssociation).filter_by( port_id=port_id).first() if entry: return entry.firewall_group_id vmware-nsx-12.0.1/vmware_nsx/services/fwaas/common/fwaas_callbacks_v1.py0000666000175100017510000001401013244523345026416 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron.agent.l3 import router_info from neutron.common import config as neutron_config # noqa from neutron_fwaas.db.firewall import firewall_db # noqa from neutron_fwaas.db.firewall import firewall_router_insertion_db \ as fw_r_ins_db from neutron_fwaas.services.firewall.agents.l3reference \ import firewall_l3_agent from neutron_lib import constants as nl_constants from neutron_lib import context as n_context from neutron_lib.plugins import directory LOG = logging.getLogger(__name__) class NsxFwaasCallbacks(firewall_l3_agent.L3WithFWaaS): """Common NSX RPC callbacks for Firewall As A Service - V1.""" def __init__(self): # The super code needs a configuration object with the neutron host # and an agent_mode, which our driver doesn't use. neutron_conf = cfg.CONF neutron_conf.agent_mode = 'nsx' super(NsxFwaasCallbacks, self).__init__(conf=neutron_conf) self._core_plugin = None @property def plugin_type(self): pass @property def core_plugin(self): """Get the NSX-V3 core plugin""" if not self._core_plugin: self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin(): # get the plugin that match this driver self._core_plugin = self._core_plugin.get_plugin_by_type( self.plugin_type) return self._core_plugin # Override functions using the agent_api that is not used by our plugin def _get_router_ids_for_fw(self, context, fw, to_delete=False): """Return the router_ids either from fw dict or tenant routers.""" routers_in_proj = self._get_routers_in_project( context, fw['tenant_id']) if self._has_router_insertion_fields(fw): # it is a new version of plugin (supports specific routers) ids = (fw['del-router-ids'] if to_delete else fw['add-router-ids']) project_ids = [router['id'] for router in routers_in_proj if router['id'] in ids] if len(project_ids) < len(ids): # This means that there is a router from another project. LOG.error("Failed to attach routers from a different project " "to firewall %(fw)s: %(routers)s", {'fw': fw['id'], 'routers': list(set(ids) - set(project_ids))}) self.fwplugin_rpc.set_firewall_status( context, fw['id'], nl_constants.ERROR) return ids else: return [router['id'] for router in routers_in_proj] def _get_routers_in_project(self, context, project_id): return self.core_plugin.get_routers( context, filters={'project_id': [project_id]}) def _router_dict_to_obj(self, r): # The callbacks expect a router-info object return router_info.RouterInfo( None, r['id'], router=r, agent_conf=None, interface_driver=None, use_ipv6=False) def _get_router_info_list_for_tenant(self, router_ids, tenant_id): """Returns the list of router info objects on which to apply the fw.""" context = n_context.get_admin_context() tenant_routers = self._get_routers_in_project(context, tenant_id) return [self._router_dict_to_obj(ri) for ri in tenant_routers if ri['id'] in router_ids] def should_apply_firewall_to_router(self, context, router_id): """Return True if the FWaaS rules should be added to this router.""" if not self.fwaas_enabled: return False ctx = context.elevated() fw_id = self._get_router_firewall_id(ctx, router_id) if fw_id is None: # No FWaas Firewall was assigned to this router return False # check the state of this firewall firewall = self._get_fw_from_plugin(ctx, fw_id) if firewall is not None: if firewall.get('status') in (nl_constants.ERROR, nl_constants.PENDING_DELETE): # Do not add rules of firewalls with errors LOG.warning("Router %(rtr)s will not get rules from firewall " "%(fw)s which is in %(status)s", {'rtr': router_id, 'fw': fw_id, 'status': firewall['status']}) return False return True # TODO(asarfaty): add this api to fwaas firewall-router-insertion-db def _get_router_firewall_id(self, context, router_id): entry = context.session.query( fw_r_ins_db.FirewallRouterAssociation).filter_by( router_id=router_id).first() if entry: return entry.fw_id def _get_fw_from_plugin(self, context, fw_id): # NOTE(asarfaty): currently there is no api to get a specific firewall fw_list = self.fwplugin_rpc.get_firewalls_for_tenant(context) for fw in fw_list: if fw['id'] == fw_id: return fw def get_router_firewall(self, context, router_id): ctx_elevated = context.elevated() fw_id = self._get_router_firewall_id(ctx_elevated, router_id) if fw_id: return self._get_fw_from_plugin(ctx_elevated, fw_id) vmware-nsx-12.0.1/vmware_nsx/services/fwaas/common/utils.py0000666000175100017510000000175713244523345024066 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_fwaas.common import fwaas_constants from neutron_lib.plugins import directory def is_fwaas_v1_plugin_enabled(): fwaas_plugin = directory.get_plugin(fwaas_constants.FIREWALL) if fwaas_plugin: return True def is_fwaas_v2_plugin_enabled(): fwaas_plugin = directory.get_plugin(fwaas_constants.FIREWALL_V2) if fwaas_plugin: return True vmware-nsx-12.0.1/vmware_nsx/services/fwaas/common/__init__.py0000666000175100017510000000000013244523345024441 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/fwaas/nsx_tv/0000775000175100017510000000000013244524600022364 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/fwaas/nsx_tv/plugin_v2.py0000666000175100017510000000210613244523345024651 0ustar zuulzuul00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_fwaas.services.firewall import fwaas_plugin_v2 from vmware_nsx.plugins.nsx import utils as tvd_utils @tvd_utils.filter_plugins class FwaasTVPluginV2(fwaas_plugin_v2.FirewallPluginV2): """NSX-TV plugin for Firewall As A Service - V2. This plugin adds separation between T/V instances """ methods_to_separate = ['get_firewall_groups', 'get_firewall_policies', 'get_firewall_rules'] vmware-nsx-12.0.1/vmware_nsx/services/fwaas/nsx_tv/plugin_v1.py0000666000175100017510000000360613244523345024656 0ustar zuulzuul00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_fwaas.services.firewall import fwaas_plugin from vmware_nsx.plugins.nsx import utils as tvd_utils @tvd_utils.filter_plugins class FwaasTVPluginV1(fwaas_plugin.FirewallPlugin): """NSX-TV plugin for Firewall As A Service - V1. This plugin adds separation between T/V instances """ methods_to_separate = ['get_firewalls', 'get_firewall_policies', 'get_firewall_rules'] def validate_firewall_routers_not_in_use( self, context, router_ids, fwid=None): # Override this method to verify that the router & firewall belongs to # the same plugin context_plugin_type = tvd_utils.get_tvd_plugin_type_for_project( context.project_id, context) core_plugin = directory.get_plugin() for rtr_id in router_ids: rtr_plugin = core_plugin._get_plugin_from_router_id( context, rtr_id) if rtr_plugin.plugin_type() != context_plugin_type: err_msg = (_('Router should belong to the %s plugin ' 'as the firewall') % context_plugin_type) raise n_exc.InvalidInput(error_message=err_msg) vmware-nsx-12.0.1/vmware_nsx/services/fwaas/nsx_tv/__init__.py0000666000175100017510000000000013244523345024472 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/fwaas/nsx_tv/edge_fwaas_driver_v1.py0000666000175100017510000000736413244523345027025 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from neutron_fwaas.services.firewall.drivers import fwaas_base from neutron_lib.exceptions import firewall_v1 as exceptions from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx import utils as tvd_utils from vmware_nsx.services.fwaas.nsx_v import edge_fwaas_driver as v_driver from vmware_nsx.services.fwaas.nsx_v3 import edge_fwaas_driver_v1 as t_driver LOG = logging.getLogger(__name__) FWAAS_DRIVER_NAME = 'FwaaS V1 NSX-TV driver' class EdgeFwaasTVDriverV1(fwaas_base.FwaasDriverBase): """NSX-TV driver for Firewall As A Service - V1. This driver is just a wrapper calling the relevant nsx-v/t driver """ def __init__(self): super(EdgeFwaasTVDriverV1, self).__init__() self.driver_name = FWAAS_DRIVER_NAME # supported drivers: self.drivers = {} try: self.drivers[projectpluginmap.NsxPlugins.NSX_T] = ( t_driver.EdgeFwaasV3DriverV1()) except Exception: LOG.warning("EdgeFwaasTVDriverV1 failed to initialize the NSX-T " "driver") self.drivers[projectpluginmap.NsxPlugins.NSX_T] = None try: self.drivers[projectpluginmap.NsxPlugins.NSX_V] = ( v_driver.EdgeFwaasDriver()) except Exception: LOG.warning("EdgeFwaasTVDriverV1 failed to initialize the NSX-V " "driver") self.drivers[projectpluginmap.NsxPlugins.NSX_V] = None def get_T_driver(self): return self.drivers[projectpluginmap.NsxPlugins.NSX_T] def get_V_driver(self): return self.drivers[projectpluginmap.NsxPlugins.NSX_V] def _get_driver_for_project(self, project): plugin_type = tvd_utils.get_tvd_plugin_type_for_project(project) if not self.drivers.get(plugin_type): LOG.error("Project %(project)s with plugin %(plugin)s has no " "support for FWaaS V1", {'project': project, 'plugin': plugin_type}) raise exceptions.FirewallInternalDriverError( driver=self.driver_name) return self.drivers[plugin_type] @log_helpers.log_method_call def create_firewall(self, agent_mode, apply_list, firewall): d = self._get_driver_for_project(firewall['tenant_id']) return d.create_firewall(agent_mode, apply_list, firewall) @log_helpers.log_method_call def update_firewall(self, agent_mode, apply_list, firewall): d = self._get_driver_for_project(firewall['tenant_id']) return d.update_firewall(agent_mode, apply_list, firewall) @log_helpers.log_method_call def delete_firewall(self, agent_mode, apply_list, firewall): d = self._get_driver_for_project(firewall['tenant_id']) return d.delete_firewall(agent_mode, apply_list, firewall) @log_helpers.log_method_call def apply_default_policy(self, agent_mode, apply_list, firewall): d = self._get_driver_for_project(firewall['tenant_id']) return d.apply_default_policy(agent_mode, apply_list, firewall) vmware-nsx-12.0.1/vmware_nsx/services/fwaas/nsx_tv/edge_fwaas_driver_v2.py0000666000175100017510000000660613244523345027024 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from neutron_fwaas.services.firewall.drivers import fwaas_base_v2 from neutron_lib.exceptions import firewall_v2 as exceptions from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx import utils as tvd_utils from vmware_nsx.services.fwaas.nsx_v3 import edge_fwaas_driver_v2 as t_driver LOG = logging.getLogger(__name__) FWAAS_DRIVER_NAME = 'FwaaS V2 NSX-TV driver' class EdgeFwaasTVDriverV2(fwaas_base_v2.FwaasDriverBase): """NSX-TV driver for Firewall As A Service - V2. This driver is just a wrapper calling the relevant nsx-v3 driver """ def __init__(self): super(EdgeFwaasTVDriverV2, self).__init__() self.driver_name = FWAAS_DRIVER_NAME # supported drivers (Only NSX-T): self.drivers = {} try: self.drivers[projectpluginmap.NsxPlugins.NSX_T] = ( t_driver.EdgeFwaasV3DriverV2()) except Exception: LOG.warning("EdgeFwaasTVDriverV2 failed to initialize the NSX-T " "driver") self.drivers[projectpluginmap.NsxPlugins.NSX_T] = None def get_T_driver(self): return self.drivers[projectpluginmap.NsxPlugins.NSX_T] def _get_driver_for_project(self, project): plugin_type = tvd_utils.get_tvd_plugin_type_for_project(project) if not self.drivers.get(plugin_type): LOG.error("Project %(project)s with plugin %(plugin)s has no " "support for FWaaS V2", {'project': project, 'plugin': plugin_type}) raise exceptions.FirewallInternalDriverError( driver=self.driver_name) return self.drivers[plugin_type] @log_helpers.log_method_call def create_firewall_group(self, agent_mode, apply_list, firewall_group): d = self._get_driver_for_project(firewall_group['tenant_id']) return d.create_firewall_group(agent_mode, apply_list, firewall_group) @log_helpers.log_method_call def update_firewall_group(self, agent_mode, apply_list, firewall_group): d = self._get_driver_for_project(firewall_group['tenant_id']) return d.update_firewall_group(agent_mode, apply_list, firewall_group) @log_helpers.log_method_call def delete_firewall_group(self, agent_mode, apply_list, firewall_group): d = self._get_driver_for_project(firewall_group['tenant_id']) return d.delete_firewall_group(agent_mode, apply_list, firewall_group) @log_helpers.log_method_call def apply_default_policy(self, agent_mode, apply_list, firewall_group): d = self._get_driver_for_project(firewall_group['tenant_id']) return d.apply_default_policy(agent_mode, apply_list, firewall_group) vmware-nsx-12.0.1/vmware_nsx/services/fwaas/nsx_v/0000775000175100017510000000000013244524600022200 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/fwaas/nsx_v/fwaas_callbacks.py0000666000175100017510000000636513244523345025673 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from vmware_nsx.extensions import projectpluginmap from vmware_nsx.services.fwaas.common import fwaas_callbacks_v1 as com_clbcks from vmware_nsx.services.fwaas.nsx_tv import edge_fwaas_driver_v1 as tv_driver LOG = logging.getLogger(__name__) class NsxvFwaasCallbacks(com_clbcks.NsxFwaasCallbacks): """NSX-V RPC callbacks for Firewall As A Service - V1.""" def __init__(self): super(NsxvFwaasCallbacks, self).__init__() # update the fwaas driver in case of TV plugin if self.fwaas_enabled: if self.fwaas_driver.driver_name == tv_driver.FWAAS_DRIVER_NAME: self.internal_driver = self.fwaas_driver.get_V_driver() else: self.internal_driver = self.fwaas_driver @property def plugin_type(self): return projectpluginmap.NsxPlugins.NSX_V def should_apply_firewall_to_router(self, context, router, router_id): """Return True if the FWaaS rules should be added to this router.""" # in case of a distributed-router: # router['id'] is the id of the neutron router (=tlr) # and router_id is the plr/tlr (the one that is being updated) if not super(NsxvFwaasCallbacks, self).should_apply_firewall_to_router( context, router['id']): return False # get all the relevant router info # ("router" does not have all the fields) ctx_elevated = context.elevated() router_data = self.core_plugin.get_router(ctx_elevated, router['id']) if not router_data: LOG.error("Couldn't read router %s data", router['id']) return False if router_data.get('distributed'): if router_id == router['id']: # Do not add firewall rules on the tlr router. return False # Check if the FWaaS driver supports this router if not self.internal_driver.should_apply_firewall_to_router( router_data, raise_exception=False): return False return True def get_fwaas_rules_for_router(self, context, router_id): """Return the list of (translated) FWaaS rules for this router.""" ctx_elevated = context.elevated() fw_id = self._get_router_firewall_id(ctx_elevated, router_id) if fw_id: return self._get_fw_applicable_rules(ctx_elevated, fw_id) return [] def _get_fw_applicable_rules(self, context, fw_id): fw = self._get_fw_from_plugin(context, fw_id) if fw is not None and fw['id'] == fw_id: return self.internal_driver.get_firewall_translated_rules(fw) return [] vmware-nsx-12.0.1/vmware_nsx/services/fwaas/nsx_v/__init__.py0000666000175100017510000000000013244523345024306 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/fwaas/nsx_v/edge_fwaas_driver.py0000666000175100017510000002504713244523345026231 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import context as n_context from neutron_lib.exceptions import firewall_v1 as exceptions from neutron_lib.plugins import directory from oslo_log import helpers as log_helpers from oslo_log import log as logging from neutron_fwaas.services.firewall.drivers import fwaas_base from vmware_nsx.common import locking from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx_v.vshield import edge_utils LOG = logging.getLogger(__name__) FWAAS_DRIVER_NAME = 'Fwaas V1 NSX-V driver' RULE_NAME_PREFIX = 'Fwaas-' class EdgeFwaasDriver(fwaas_base.FwaasDriverBase): """NSX-V driver for Firewall As A Service - V1.""" @property def core_plugin(self): if not self._core_plugin: self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin(): self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_V) # make sure plugin init was completed if not self._core_plugin.init_is_complete: self._core_plugin.init_complete(None, None, {}) return self._core_plugin @property def edge_manager(self): return self.core_plugin.edge_manager def __init__(self): LOG.debug("Loading FWaaS V1 NsxVDriver.") super(EdgeFwaasDriver, self).__init__() self.driver_name = FWAAS_DRIVER_NAME self._core_plugin = None def should_apply_firewall_to_router(self, router_data, raise_exception=True): """Return True if the firewall rules should be added the router Return False in those cases: - router without an external gateway (rule may be added later when there is a gateway) Raise an exception if the router is unsupported (and raise_exception is True): - shared router (not supported) - md proxy router (not supported) """ if (not router_data.get('distributed') and router_data.get('router_type') == 'shared'): LOG.error("Cannot apply firewall to shared router %s", router_data['id']) if raise_exception: raise exceptions.FirewallInternalDriverError( driver=self.driver_name) return False if router_data.get('name', '').startswith('metadata_proxy_router'): LOG.error("Cannot apply firewall to the metadata proxy router %s", router_data['id']) if raise_exception: raise exceptions.FirewallInternalDriverError( driver=self.driver_name) return False if not router_data.get('external_gateway_info'): LOG.info("Cannot apply firewall to router %s with no gateway", router_data['id']) return False return True def _get_routers_edges(self, context, apply_list): # Get edges for all the routers in the apply list. # note that shared routers are currently not supported edge_manager = self.edge_manager edges_map = {} for router_info in apply_list: # No FWaaS rules needed if there is no external gateway if not self.should_apply_firewall_to_router(router_info.router): continue lookup_id = None router_id = router_info.router_id if router_info.router.get('distributed'): # Distributed router # we need the plr edge id lookup_id = edge_manager.get_plr_by_tlr_id( context, router_id) else: # Exclusive router lookup_id = router_id if lookup_id: # look for the edge id in the DB edge_id = edge_utils.get_router_edge_id(context, lookup_id) if edge_id: edges_map[router_id] = {'edge_id': edge_id, 'lookup_id': lookup_id} return edges_map def _translate_rules(self, fwaas_rules, logged=False): translated_rules = [] for rule in fwaas_rules: if not rule['enabled']: # skip disabled rules continue # Make sure the rule has a name, and it starts with the prefix # (backend max name length is 30) if rule.get('name'): rule['name'] = RULE_NAME_PREFIX + rule['name'] else: rule['name'] = RULE_NAME_PREFIX + rule['id'] rule['name'] = rule['name'][:30] # source & destination should be lists if rule.get('destination_ip_address'): rule['destination_ip_address'] = [ rule['destination_ip_address']] if rule.get('source_ip_address'): rule['source_ip_address'] = [rule['source_ip_address']] if logged: rule['logged'] = True translated_rules.append(rule) return translated_rules def _set_rules_on_router_edge(self, context, router_id, neutron_id, edge_id, fw_id, translated_rules, delete_fw=False): """Recreate router edge firewall rules Using the plugin code to recreate all the rules with the additional FWaaS rules. router_id is the is of the router about to be updated (in case of distributed router - the plr) neutron_id is the neutron router id """ # update the backend router_db = self.core_plugin._get_router(context, neutron_id) try: with locking.LockManager.get_lock(str(edge_id)): self.core_plugin.update_router_firewall( context, router_id, router_db, fwaas_rules=translated_rules) except Exception as e: # catch known library exceptions and raise Fwaas generic exception LOG.error("Failed to update firewall %(fw)s on edge %(edge_id)s: " "%(e)s", {'e': e, 'fw': fw_id, 'edge_id': edge_id}) raise exceptions.FirewallInternalDriverError( driver=self.driver_name) def _create_or_update_firewall(self, agent_mode, apply_list, firewall): # admin state down means default block rule firewall if not firewall['admin_state_up']: self.apply_default_policy(agent_mode, apply_list, firewall) return # get router-edge mapping context = n_context.get_admin_context() edges_map = self._get_routers_edges(context, apply_list) if not edges_map: routers = [r.router_id for r in apply_list] LOG.warning("Cannot apply the firewall %(fw)s to any of the " "routers %(rtrs)s", {'fw': firewall['id'], 'rtrs': routers}) return # Translate the FWaaS rules # TODO(asarfaty): get this value from the firewall extensions logged = False rules = self._translate_rules(firewall['firewall_rule_list'], logged=logged) # update each relevant edge with the new rules for router_info in apply_list: neutron_id = router_info.router_id info = edges_map.get(neutron_id) if info: self._set_rules_on_router_edge( context, info['lookup_id'], neutron_id, info['edge_id'], firewall['id'], rules) @log_helpers.log_method_call def create_firewall(self, agent_mode, apply_list, firewall): """Create the Firewall with a given policy. """ self._create_or_update_firewall(agent_mode, apply_list, firewall) @log_helpers.log_method_call def update_firewall(self, agent_mode, apply_list, firewall): """Remove previous policy and apply the new policy.""" self._create_or_update_firewall(agent_mode, apply_list, firewall) def _delete_firewall_or_set_default_policy(self, apply_list, firewall, delete_fw=False): # get router-edge mapping context = n_context.get_admin_context() edges_map = self._get_routers_edges(context, apply_list) # if the firewall is deleted, rules should be None rules = None if delete_fw else [] # Go over all routers and update them on backend for router_info in apply_list: neutron_id = router_info.router_id info = edges_map.get(neutron_id) if info: self._set_rules_on_router_edge( context, info['lookup_id'], neutron_id, info['edge_id'], firewall['id'], rules, delete_fw=delete_fw) @log_helpers.log_method_call def delete_firewall(self, agent_mode, apply_list, firewall): """Delete firewall. Removes rules created by this instance from the backend firewall And add the default allow-external rule. """ self._delete_firewall_or_set_default_policy(apply_list, firewall, delete_fw=True) @log_helpers.log_method_call def apply_default_policy(self, agent_mode, apply_list, firewall): """Apply the default policy (deny all). The backend firewall always has this policy (=deny all) as default, so we only need to delete the current rules. """ self._delete_firewall_or_set_default_policy(apply_list, firewall, delete_fw=False) def get_firewall_translated_rules(self, firewall): if firewall['admin_state_up']: # TODO(asarfaty): get this value from the firewall extensions logged = False return self._translate_rules(firewall['firewall_rule_list'], logged=logged) return [] vmware-nsx-12.0.1/vmware_nsx/services/trunk/0000775000175100017510000000000013244524600021105 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/trunk/nsx_v3/0000775000175100017510000000000013244524600022325 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/trunk/nsx_v3/__init__.py0000666000175100017510000000000013244523345024433 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/trunk/nsx_v3/driver.py0000666000175100017510000002200513244523345024200 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron.services.trunk import constants as trunk_consts from neutron.services.trunk.drivers import base from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from vmware_nsx.common import nsx_constants as nsx_consts from vmware_nsx.common import utils as nsx_utils from vmware_nsx.db import db as nsx_db from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import nsx_constants LOG = logging.getLogger(__name__) SUPPORTED_INTERFACES = ( portbindings.VIF_TYPE_OVS, ) SUPPORTED_SEGMENTATION_TYPES = ( trunk_consts.VLAN, ) class NsxV3TrunkHandler(object): """Class to handle trunk events.""" def __init__(self, plugin_driver): self.plugin_driver = plugin_driver @property def _nsxlib(self): return self.plugin_driver.nsxlib def _build_switching_profile_ids(self, profiles): switching_profile = self._nsxlib.switching_profile return switching_profile.build_switch_profile_ids( switching_profile.client, *profiles) def _update_port_at_backend(self, context, parent_port_id, subport): # Retrieve the child port details child_port = self.plugin_driver.get_port(context, subport.port_id) # Retrieve the logical port ID based on the child port's neutron ID nsx_child_port_id = nsx_db.get_nsx_switch_and_port_id( session=context.session, neutron_id=subport.port_id)[1] # Retrieve child logical port from the backend try: nsx_child_port = self._nsxlib.logical_port.get( nsx_child_port_id) except nsxlib_exc.ResourceNotFound: with excutils.save_and_reraise_exception(): LOG.error("Child port %s not found on the backend. " "Setting trunk status to ERROR.", nsx_child_port_id) # Build address bindings and switch profiles otherwise backend will # clear that information during port update address_bindings = self.plugin_driver._build_address_bindings( child_port) switching_profile_ids = self._build_switching_profile_ids( nsx_child_port.get('switching_profile_ids', [])) seg_id = None tags_update = [] attachment_type = nsx_constants.ATTACHMENT_VIF if parent_port_id: # Set properties for VLAN trunking if subport.segmentation_type == nsx_utils.NsxV3NetworkTypes.VLAN: seg_id = subport.segmentation_id tags_update.append({'scope': 'os-neutron-trunk-id', 'tag': subport.trunk_id}) vif_type = nsx_constants.VIF_TYPE_CHILD else: # Unset the parent port properties from child port seg_id = None vif_type = None tags_update.append({'scope': 'os-neutron-trunk-id', 'tag': None}) # Update logical port in the backend to set/unset parent port try: self._nsxlib.logical_port.update( lport_id=nsx_child_port.get('id'), vif_uuid=subport.port_id, name=nsx_child_port.get('display_name'), admin_state=nsx_child_port.get('admin_state'), address_bindings=address_bindings, switch_profile_ids=switching_profile_ids, attachment_type=attachment_type, parent_vif_id=parent_port_id, vif_type=vif_type, traffic_tag=seg_id, tags_update=tags_update) except nsxlib_exc.ManagerError as e: with excutils.save_and_reraise_exception(): LOG.error("Unable to update subport for attachment " "type. Setting trunk status to ERROR. " "Exception is %s", e) def _set_subports(self, context, parent_port_id, subports): for subport in subports: # Update port with parent port for backend. self._update_port_at_backend(context, parent_port_id, subport) def _unset_subports(self, context, subports): for subport in subports: # Update port and remove parent port attachment in the backend self._update_port_at_backend( context=context, parent_port_id=None, subport=subport) def trunk_created(self, context, trunk): # Retrieve the logical port ID based on the parent port's neutron ID nsx_parent_port_id = nsx_db.get_nsx_switch_and_port_id( session=context.session, neutron_id=trunk.port_id)[1] tags_update = [{'scope': 'os-neutron-trunk-id', 'tag': trunk.id}] self.plugin_driver.nsxlib.logical_port.update( nsx_parent_port_id, vif_uuid=trunk.port_id, vif_type=nsx_constants.VIF_TYPE_PARENT, tags_update=tags_update) try: if trunk.sub_ports: self._set_subports(context, trunk.port_id, trunk.sub_ports) trunk.update(status=trunk_consts.ACTIVE_STATUS) except (nsxlib_exc.ManagerError, nsxlib_exc.ResourceNotFound): trunk.update(status=trunk_consts.ERROR_STATUS) def trunk_deleted(self, context, trunk): # Retrieve the logical port ID based on the parent port's neutron ID nsx_parent_port_id = nsx_db.get_nsx_switch_and_port_id( session=context.session, neutron_id=trunk.port_id)[1] tags_update = [{'scope': 'os-neutron-trunk-id', 'tag': None}] self.plugin_driver.nsxlib.logical_port.update( nsx_parent_port_id, vif_uuid=trunk.port_id, vif_type=None, tags_update=tags_update) self._unset_subports(context, trunk.sub_ports) def subports_added(self, context, trunk, subports): try: self._set_subports(context, trunk.port_id, subports) trunk.update(status=trunk_consts.ACTIVE_STATUS) except (nsxlib_exc.ManagerError, nsxlib_exc.ResourceNotFound): trunk.update(status=trunk_consts.ERROR_STATUS) def subports_deleted(self, context, trunk, subports): try: self._unset_subports(context, subports) except (nsxlib_exc.ManagerError, nsxlib_exc.ResourceNotFound): trunk.update(status=trunk_consts.ERROR_STATUS) def trunk_event(self, resource, event, trunk_plugin, payload): if event == events.AFTER_CREATE: self.trunk_created(payload.context, payload.current_trunk) elif event == events.AFTER_DELETE: self.trunk_deleted(payload.context, payload.original_trunk) def subport_event(self, resource, event, trunk_plugin, payload): if event == events.AFTER_CREATE: self.subports_added( payload.context, payload.original_trunk, payload.subports) elif event == events.AFTER_DELETE: self.subports_deleted( payload.context, payload.original_trunk, payload.subports) class NsxV3TrunkDriver(base.DriverBase): """Driver to implement neutron's trunk extensions.""" @property def is_loaded(self): try: return nsx_consts.VMWARE_NSX_V3_PLUGIN_NAME == cfg.CONF.core_plugin except cfg.NoSuchOptError: return False @classmethod def create(cls, plugin_driver): cls.plugin_driver = plugin_driver return cls(nsx_consts.VMWARE_NSX_V3_PLUGIN_NAME, SUPPORTED_INTERFACES, SUPPORTED_SEGMENTATION_TYPES, agent_type=None, can_trunk_bound_port=True) @registry.receives(trunk_consts.TRUNK_PLUGIN, [events.AFTER_INIT]) def register(self, resource, event, trigger, payload=None): super(NsxV3TrunkDriver, self).register( resource, event, trigger, payload=payload) self._handler = NsxV3TrunkHandler(self.plugin_driver) for event in (events.AFTER_CREATE, events.AFTER_DELETE): registry.subscribe(self._handler.trunk_event, trunk_consts.TRUNK, event) registry.subscribe(self._handler.subport_event, trunk_consts.SUBPORTS, event) LOG.debug("VMware NSXv3 trunk driver initialized.") vmware-nsx-12.0.1/vmware_nsx/services/trunk/__init__.py0000666000175100017510000000000013244523345023213 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/lbaas/0000775000175100017510000000000013244524600021024 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v3/0000775000175100017510000000000013244524600022244 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v3/listener_mgr.py0000666000175100017510000003303013244523345025316 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v3 import lb_utils from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) class EdgeListenerManager(base_mgr.Nsxv3LoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self): super(EdgeListenerManager, self).__init__() def _get_virtual_server_kwargs(self, context, listener, vs_name, tags, app_profile_id, certificate=None): # If loadbalancer vip_port already has floating ip, use floating # IP as the virtual server VIP address. Else, use the loadbalancer # vip_address directly on virtual server. filters = {'port_id': [listener.loadbalancer.vip_port_id]} floating_ips = self.core_plugin.get_floatingips(context, filters=filters) if floating_ips: lb_vip_address = floating_ips[0]['floating_ip_address'] else: lb_vip_address = listener.loadbalancer.vip_address kwargs = {'enabled': listener.admin_state_up, 'ip_address': lb_vip_address, 'port': listener.protocol_port, 'application_profile_id': app_profile_id} if vs_name: kwargs['display_name'] = vs_name if tags: kwargs['tags'] = tags if listener.connection_limit != -1: kwargs['max_concurrent_connections'] = \ listener.connection_limit if listener.default_pool_id: pool_binding = nsx_db.get_nsx_lbaas_pool_binding( context.session, listener.loadbalancer.id, listener.default_pool_id) if pool_binding: kwargs['pool_id'] = pool_binding.get('lb_pool_id') ssl_profile_binding = self._get_ssl_profile_binding( tags, certificate=certificate) if (listener.protocol == lb_const.LB_PROTOCOL_TERMINATED_HTTPS and ssl_profile_binding): kwargs.update(ssl_profile_binding) return kwargs def _get_ssl_profile_binding(self, tags, certificate=None): tm_client = self.core_plugin.nsxlib.trust_management if certificate: # First check if NSX already has certificate with same pem. # If so, use that certificate for ssl binding. Otherwise, # create a new certificate on NSX. cert_ids = tm_client.find_cert_with_pem( certificate.get_certificate()) if cert_ids: nsx_cert_id = cert_ids[0] else: nsx_cert_id = tm_client.create_cert( certificate.get_certificate(), private_key=certificate.get_private_key(), passphrase=certificate.get_private_key_passphrase(), tags=tags) return { 'client_ssl_profile_binding': { 'ssl_profile_id': self.core_plugin.client_ssl_profile, 'default_certificate_id': nsx_cert_id } } def _get_listener_tags(self, context, listener): tags = lb_utils.get_tags(self.core_plugin, listener.id, lb_const.LB_LISTENER_TYPE, listener.tenant_id, context.project_name) tags.append({'scope': 'os-lbaas-lb-name', 'tag': listener.loadbalancer.name[:utils.MAX_TAG_LEN]}) tags.append({'scope': 'os-lbaas-lb-id', 'tag': listener.loadbalancer_id}) return tags @log_helpers.log_method_call def create(self, context, listener, certificate=None): lb_id = listener.loadbalancer_id load_balancer = self.core_plugin.nsxlib.load_balancer app_client = load_balancer.application_profile vs_client = load_balancer.virtual_server service_client = load_balancer.service vs_name = utils.get_name_and_uuid(listener.name or 'listener', listener.id) tags = self._get_listener_tags(context, listener) if (listener.protocol == lb_const.LB_PROTOCOL_HTTP or listener.protocol == lb_const.LB_PROTOCOL_TERMINATED_HTTPS): profile_type = lb_const.LB_HTTP_PROFILE elif (listener.protocol == lb_const.LB_PROTOCOL_TCP or listener.protocol == lb_const.LB_PROTOCOL_HTTPS): profile_type = lb_const.LB_TCP_PROFILE else: msg = (_('Cannot create listener %(listener)s with ' 'protocol %(protocol)s') % {'listener': listener.id, 'protocol': listener.protocol}) raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) try: app_profile = app_client.create( display_name=vs_name, resource_type=profile_type, tags=tags) app_profile_id = app_profile['id'] kwargs = self._get_virtual_server_kwargs( context, listener, vs_name, tags, app_profile_id, certificate) virtual_server = vs_client.create(**kwargs) except nsxlib_exc.ManagerError: self.lbv2_driver.listener.failed_completion(context, listener) msg = _('Failed to create virtual server at NSX backend') raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) # If there is already lb:lb_service binding, add the virtual # server to the lb service binding = nsx_db.get_nsx_lbaas_loadbalancer_binding( context.session, lb_id) if binding: lb_service_id = binding['lb_service_id'] try: service_client.add_virtual_server(lb_service_id, virtual_server['id']) except nsxlib_exc.ManagerError: self.lbv2_driver.listener.failed_completion(context, listener) msg = _('Failed to add virtual server to lb service ' 'at NSX backend') raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) nsx_db.add_nsx_lbaas_listener_binding( context.session, lb_id, listener.id, app_profile_id, virtual_server['id']) self.lbv2_driver.listener.successful_completion( context, listener) @log_helpers.log_method_call def update(self, context, old_listener, new_listener, certificate=None): vs_client = self.core_plugin.nsxlib.load_balancer.virtual_server app_client = self.core_plugin.nsxlib.load_balancer.application_profile vs_name = None tags = None if new_listener.name != old_listener.name: vs_name = utils.get_name_and_uuid(new_listener.name or 'listener', new_listener.id) tags = self._get_listener_tags(context, new_listener) binding = nsx_db.get_nsx_lbaas_listener_binding( context.session, old_listener.loadbalancer_id, old_listener.id) if not binding: msg = (_('Cannot find listener %(listener)s binding on NSX ' 'backend'), {'listener': old_listener.id}) raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) try: vs_id = binding['lb_vs_id'] app_profile_id = binding['app_profile_id'] updated_kwargs = self._get_virtual_server_kwargs( context, new_listener, vs_name, tags, app_profile_id, certificate) vs_client.update(vs_id, **updated_kwargs) if vs_name: app_client.update(app_profile_id, display_name=vs_name, tags=tags) self.lbv2_driver.listener.successful_completion(context, new_listener) except Exception as e: with excutils.save_and_reraise_exception(): self.lbv2_driver.listener.failed_completion( context, new_listener) LOG.error('Failed to update listener %(listener)s with ' 'error %(error)s', {'listener': old_listener.id, 'error': e}) @log_helpers.log_method_call def delete(self, context, listener): lb_id = listener.loadbalancer_id load_balancer = self.core_plugin.nsxlib.load_balancer service_client = load_balancer.service vs_client = load_balancer.virtual_server app_client = load_balancer.application_profile binding = nsx_db.get_nsx_lbaas_listener_binding( context.session, lb_id, listener.id) if binding: vs_id = binding['lb_vs_id'] app_profile_id = binding['app_profile_id'] lb_binding = nsx_db.get_nsx_lbaas_loadbalancer_binding( context.session, lb_id) if lb_binding: try: lbs_id = lb_binding.get('lb_service_id') lb_service = service_client.get(lbs_id) vs_list = lb_service.get('virtual_server_ids') if vs_list and vs_id in vs_list: service_client.remove_virtual_server(lbs_id, vs_id) except nsxlib_exc.ManagerError: self.lbv2_driver.listener.failed_completion(context, listener) msg = (_('Failed to remove virtual server: %(listener)s ' 'from lb service %(lbs)s') % {'listener': listener.id, 'lbs': lbs_id}) raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) try: if listener.default_pool_id: vs_client.update(vs_id, pool_id='') # Update pool binding to disassociate virtual server pool_binding = nsx_db.get_nsx_lbaas_pool_binding( context.session, lb_id, listener.default_pool_id) if pool_binding: nsx_db.update_nsx_lbaas_pool_binding( context.session, lb_id, listener.default_pool_id, None) vs_client.delete(vs_id) except nsx_exc.NsxResourceNotFound: msg = (_("virtual server not found on nsx: %(vs)s") % {'vs': vs_id}) raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) except nsxlib_exc.ManagerError: self.lbv2_driver.listener.failed_completion(context, listener) msg = (_('Failed to delete virtual server: %(listener)s') % {'listener': listener.id}) raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) try: app_client.delete(app_profile_id) except nsx_exc.NsxResourceNotFound: msg = (_("application profile not found on nsx: %s") % app_profile_id) raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) except nsxlib_exc.ManagerError: self.lbv2_driver.listener.failed_completion(context, listener) msg = (_('Failed to delete application profile: %(app)s') % {'app': app_profile_id}) raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) # Delete imported NSX cert if there is any cert_tags = [{'scope': lb_const.LB_LISTENER_TYPE, 'tag': listener.id}] results = self.core_plugin.nsxlib.search_by_tags( tags=cert_tags) # Only delete object related to certificate used by listener for obj in results['results']: if obj.get('resource_type') in lb_const.LB_CERT_RESOURCE_TYPE: tm_client = self.core_plugin.nsxlib.trust_management try: tm_client.delete_cert(obj['id']) except nsxlib_exc.ManagerError: LOG.error("Exception thrown when trying to delete " "certificate: %(cert)s", {'cert': obj['id']}) nsx_db.delete_nsx_lbaas_listener_binding( context.session, lb_id, listener.id) self.lbv2_driver.listener.successful_completion( context, listener, delete=True) vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v3/pool_mgr.py0000666000175100017510000001652313244523345024452 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v3 import lb_utils from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) class EdgePoolManager(base_mgr.Nsxv3LoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self): super(EdgePoolManager, self).__init__() def _get_pool_kwargs(self, name=None, tags=None, algorithm=None, session_persistence=None): kwargs = {} if name: kwargs['display_name'] = name if tags: kwargs['tags'] = tags if algorithm: kwargs['algorithm'] = algorithm if session_persistence: kwargs['session_persistence'] = session_persistence kwargs['snat_translation'] = {'type': "LbSnatAutoMap"} return kwargs def _get_pool_tags(self, context, pool): return lb_utils.get_tags(self.core_plugin, pool.id, lb_const.LB_POOL_TYPE, pool.tenant_id, context.project_name) @log_helpers.log_method_call def create(self, context, pool): lb_id = pool.loadbalancer_id pool_client = self.core_plugin.nsxlib.load_balancer.pool vs_client = self.core_plugin.nsxlib.load_balancer.virtual_server pool_name = utils.get_name_and_uuid(pool.name or 'pool', pool.id) tags = self._get_pool_tags(context, pool) lb_algorithm = lb_const.LB_POOL_ALGORITHM_MAP.get(pool.lb_algorithm) try: kwargs = self._get_pool_kwargs(pool_name, tags, lb_algorithm) lb_pool = pool_client.create(**kwargs) nsx_db.add_nsx_lbaas_pool_binding( context.session, lb_id, pool.id, lb_pool['id']) except nsxlib_exc.ManagerError: self.lbv2_driver.pool.failed_completion(context, pool) msg = (_('Failed to create pool on NSX backend: %(pool)s') % {'pool': pool.id}) raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) # The pool object can be created with either --listener or # --loadbalancer option. If listener is present, the virtual server # will be updated with the pool. Otherwise, just return. The binding # will be added later when the pool is associated with layer7 rule. if pool.listener: listener_id = pool.listener.id binding = nsx_db.get_nsx_lbaas_listener_binding( context.session, lb_id, listener_id) if binding: vs_id = binding['lb_vs_id'] try: vs_client.update(vs_id, pool_id=lb_pool['id']) except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): self.lbv2_driver.pool.failed_completion(context, pool) LOG.error('Failed to attach pool %s to virtual ' 'server %s', lb_pool['id'], vs_id) nsx_db.update_nsx_lbaas_pool_binding( context.session, lb_id, pool.id, vs_id) else: msg = (_("Couldn't find binding on the listener: %s") % listener_id) raise nsx_exc.NsxPluginException(err_msg=msg) self.lbv2_driver.pool.successful_completion(context, pool) @log_helpers.log_method_call def update(self, context, old_pool, new_pool): pool_client = self.core_plugin.nsxlib.load_balancer.pool pool_name = None tags = None lb_algorithm = None if new_pool.name != old_pool.name: pool_name = utils.get_name_and_uuid(new_pool.name or 'pool', new_pool.id) tags = self._get_pool_tags(context, new_pool) if new_pool.lb_algorithm != old_pool.lb_algorithm: lb_algorithm = lb_const.LB_POOL_ALGORITHM_MAP.get( new_pool.lb_algorithm) binding = nsx_db.get_nsx_lbaas_pool_binding( context.session, old_pool.loadbalancer_id, old_pool.id) if not binding: msg = (_('Cannot find pool %(pool)s binding on NSX db ' 'mapping'), {'pool': old_pool.id}) raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) try: lb_pool_id = binding['lb_pool_id'] kwargs = self._get_pool_kwargs(pool_name, tags, lb_algorithm) pool_client.update(lb_pool_id, **kwargs) self.lbv2_driver.pool.successful_completion(context, new_pool) except Exception as e: with excutils.save_and_reraise_exception(): self.lbv2_driver.pool.failed_completion(context, new_pool) LOG.error('Failed to update pool %(pool)s with ' 'error %(error)s', {'pool': old_pool.id, 'error': e}) @log_helpers.log_method_call def delete(self, context, pool): lb_id = pool.loadbalancer_id pool_client = self.core_plugin.nsxlib.load_balancer.pool vs_client = self.core_plugin.nsxlib.load_balancer.virtual_server binding = nsx_db.get_nsx_lbaas_pool_binding( context.session, lb_id, pool.id) if binding: vs_id = binding.get('lb_vs_id') lb_pool_id = binding.get('lb_pool_id') if vs_id: try: vs_client.update(vs_id, pool_id='') except nsxlib_exc.ManagerError: self.lbv2_driver.pool.failed_completion(context, pool) msg = _('Failed to remove lb pool %(pool)s from virtual ' 'server %(vs)s') % {'pool': lb_pool_id, 'vs': vs_id} raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) try: pool_client.delete(lb_pool_id) except nsxlib_exc.ManagerError: self.lbv2_driver.pool.failed_completion(context, pool) msg = (_('Failed to delete lb pool from nsx: %(pool)s') % {'pool': lb_pool_id}) raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) nsx_db.delete_nsx_lbaas_pool_binding(context.session, lb_id, pool.id) self.lbv2_driver.pool.successful_completion( context, pool, delete=True) vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v3/loadbalancer_mgr.py0000666000175100017510000002020613244523345026101 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import exceptions as n_exc from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v3 import lb_utils from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) class EdgeLoadBalancerManager(base_mgr.Nsxv3LoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self): super(EdgeLoadBalancerManager, self).__init__() registry.subscribe( self._handle_subnet_gw_change, resources.SUBNET, events.AFTER_UPDATE) @log_helpers.log_method_call def create(self, context, lb): if lb_utils.validate_lb_subnet(context, self.core_plugin, lb.vip_subnet_id): self.lbv2_driver.load_balancer.successful_completion(context, lb) else: msg = (_('Cannot create lb on subnet %(sub)s for ' 'loadbalancer %(lb)s. The subnet needs to connect a ' 'router which is already set gateway.') % {'sub': lb.vip_subnet_id, 'lb': lb.id}) raise n_exc.BadRequest(resource='lbaas-subnet', msg=msg) @log_helpers.log_method_call def update(self, context, old_lb, new_lb): vs_client = self.core_plugin.nsxlib.load_balancer.virtual_server app_client = self.core_plugin.nsxlib.load_balancer.application_profile if new_lb.name != old_lb.name: for listener in new_lb.listeners: binding = nsx_db.get_nsx_lbaas_listener_binding( context.session, new_lb.id, listener.id) if binding: vs_id = binding['lb_vs_id'] app_profile_id = binding['app_profile_id'] new_lb_name = new_lb.name[:utils.MAX_TAG_LEN] try: # Update tag on virtual server with new lb name vs = vs_client.get(vs_id) updated_tags = utils.update_v3_tags( vs['tags'], [{'scope': lb_const.LB_LB_NAME, 'tag': new_lb_name}]) vs_client.update(vs_id, tags=updated_tags) # Update tag on application profile with new lb name app_profile = app_client.get(app_profile_id) app_client.update( app_profile_id, tags=updated_tags, resource_type=app_profile['resource_type']) except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): self.lbv2_driver.pool.failed_completion(context, new_lb) LOG.error('Failed to update tag %(tag)s for lb ' '%(lb)s', {'tag': updated_tags, 'lb': new_lb.name}) self.lbv2_driver.load_balancer.successful_completion(context, new_lb) @log_helpers.log_method_call def delete(self, context, lb): service_client = self.core_plugin.nsxlib.load_balancer.service lb_binding = nsx_db.get_nsx_lbaas_loadbalancer_binding( context.session, lb.id) if lb_binding: lb_service_id = lb_binding['lb_service_id'] nsx_router_id = lb_binding['lb_router_id'] try: lb_service = service_client.get(lb_service_id) except nsxlib_exc.ManagerError: LOG.warning("LB service %(lbs)s is not found", {'lbs': lb_service_id}) else: vs_list = lb_service.get('virtual_server_ids') if not vs_list: try: service_client.delete(lb_service_id) # If there is no lb service attached to the router, # update the router advertise_lb_vip flag to false. router_client = self.core_plugin.nsxlib.logical_router router_client.update_advertisement( nsx_router_id, advertise_lb_vip=False) except nsxlib_exc.ManagerError: self.lbv2_driver.load_balancer.failed_completion( context, lb, delete=True) msg = (_('Failed to delete lb service %(lbs)s from nsx' ) % {'lbs': lb_service_id}) raise n_exc.BadRequest(resource='lbaas-lb', msg=msg) nsx_db.delete_nsx_lbaas_loadbalancer_binding( context.session, lb.id) self.lbv2_driver.load_balancer.successful_completion( context, lb, delete=True) @log_helpers.log_method_call def refresh(self, context, lb): # TODO(tongl): implememnt pass @log_helpers.log_method_call def stats(self, context, lb): # Since multiple LBaaS loadbalancer can share the same LB service, # get the corresponding virtual servers' stats instead of LB service. stats = {'active_connections': 0, 'bytes_in': 0, 'bytes_out': 0, 'total_connections': 0} service_client = self.core_plugin.nsxlib.load_balancer.service lb_binding = nsx_db.get_nsx_lbaas_loadbalancer_binding( context.session, lb.id) vs_list = self._get_lb_virtual_servers(context, lb) if lb_binding: lb_service_id = lb_binding.get('lb_service_id') try: rsp = service_client.get_stats(lb_service_id) if rsp: for vs in rsp['virtual_servers']: # Skip the virtual server that doesn't belong # to this loadbalancer if vs['virtual_server_id'] not in vs_list: continue vs_stats = vs['statistics'] for stat in lb_const.LB_STATS_MAP: lb_stat = lb_const.LB_STATS_MAP[stat] stats[stat] += vs_stats[lb_stat] except nsxlib_exc.ManagerError: msg = _('Failed to retrieve stats from LB service ' 'for loadbalancer %(lb)s') % {'lb': lb.id} raise n_exc.BadRequest(resource='lbaas-lb', msg=msg) return stats def _get_lb_virtual_servers(self, context, lb): # Get all virtual servers that belong to this loadbalancer vs_list = [] for listener in lb.listeners: vs_binding = nsx_db.get_nsx_lbaas_listener_binding( context.session, lb.id, listener.id) if vs_binding: vs_list.append(vs_binding.get('lb_vs_id')) return vs_list def _handle_subnet_gw_change(self, *args, **kwargs): # As the Edge appliance doesn't use DHCP, we should change the # default gateway here when the subnet GW changes. orig = kwargs['original_subnet'] updated = kwargs['subnet'] if orig['gateway_ip'] == updated['gateway_ip']: return vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v3/l7policy_mgr.py0000666000175100017510000001466613244523345025251 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v3 import lb_utils from vmware_nsxlib.v3 import exceptions as nsxlib_exc LOG = logging.getLogger(__name__) class EdgeL7PolicyManager(base_mgr.Nsxv3LoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self): super(EdgeL7PolicyManager, self).__init__() @log_helpers.log_method_call def _update_policy_position(self, vs_id, rule_id, position): vs_client = self.core_plugin.nsxlib.load_balancer.virtual_server vs = vs_client.get(vs_id) lb_rules = vs.get('rule_ids', []) if rule_id in lb_rules: lb_rules.remove(rule_id) if len(lb_rules) < position: lb_rules.append(rule_id) else: lb_rules.insert(position - 1, rule_id) vs_client.update(vs_id, rule_ids=lb_rules) @log_helpers.log_method_call def create(self, context, policy): lb_id = policy.listener.loadbalancer_id listener_id = policy.listener_id rule_client = self.core_plugin.nsxlib.load_balancer.rule tags = lb_utils.get_tags(self.core_plugin, policy.id, lb_const.LB_L7POLICY_TYPE, policy.tenant_id, context.project_name) binding = nsx_db.get_nsx_lbaas_listener_binding( context.session, lb_id, listener_id) if not binding: self.lbv2_driver.l7policy.failed_completion(context, policy) msg = _('Cannot find nsx lbaas binding for listener ' '%(listener_id)s') % {'listener_id': listener_id} raise n_exc.BadRequest(resource='lbaas-l7policy-create', msg=msg) vs_id = binding['lb_vs_id'] rule_body = lb_utils.convert_l7policy_to_lb_rule(context, policy) try: lb_rule = rule_client.create(tags=tags, **rule_body) except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): self.lbv2_driver.l7policy.failed_completion(context, policy) LOG.error('Failed to create lb rule at NSX backend') try: self._update_policy_position(vs_id, lb_rule['id'], policy.position) except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): self.lbv2_driver.l7policy.failed_completion(context, policy) LOG.error('Failed to add rule %(rule)% to virtual server ' '%(vs)s at NSX backend', {'rule': lb_rule['id'], 'vs': vs_id}) nsx_db.add_nsx_lbaas_l7policy_binding( context.session, policy.id, lb_rule['id'], vs_id) self.lbv2_driver.l7policy.successful_completion(context, policy) @log_helpers.log_method_call def update(self, context, old_policy, new_policy): rule_client = self.core_plugin.nsxlib.load_balancer.rule binding = nsx_db.get_nsx_lbaas_l7policy_binding(context.session, old_policy.id) if not binding: self.lbv2_driver.l7rule.failed_completion(context, new_policy) msg = _('Cannot find nsx lbaas binding for policy ' '%(policy_id)s') % {'policy_id': old_policy.id} raise n_exc.BadRequest(resource='lbaas-l7policy-update', msg=msg) vs_id = binding['lb_vs_id'] lb_rule_id = binding['lb_rule_id'] rule_body = lb_utils.convert_l7policy_to_lb_rule(context, new_policy) try: rule_client.update(lb_rule_id, **rule_body) if new_policy.position != old_policy.position: self._update_policy_position(vs_id, lb_rule_id, new_policy.position) except Exception as e: with excutils.save_and_reraise_exception(): self.lbv2_driver.l7policy.failed_completion(context, new_policy) LOG.error('Failed to update L7policy %(policy)s: ' '%(err)s', {'policy': old_policy.id, 'err': e}) self.lbv2_driver.l7policy.successful_completion(context, new_policy) @log_helpers.log_method_call def delete(self, context, policy): vs_client = self.core_plugin.nsxlib.load_balancer.virtual_server rule_client = self.core_plugin.nsxlib.load_balancer.rule binding = nsx_db.get_nsx_lbaas_l7policy_binding(context.session, policy.id) if binding: vs_id = binding['lb_vs_id'] rule_id = binding['lb_rule_id'] try: # Update virtual server to remove lb rule vs_client.remove_rule(vs_id, rule_id) rule_client.delete(rule_id) except nsxlib_exc.ResourceNotFound: LOG.warning('LB rule %(rule)s is not found on NSX', {'rule': rule_id}) except nsxlib_exc.ManagerError: self.lbv2_driver.l7policy.failed_completion( context, policy) msg = (_('Failed to delete lb rule: %(rule)s') % {'rule': rule_id}) raise n_exc.BadRequest(resource='lbaas-l7policy-delete', msg=msg) nsx_db.delete_nsx_lbaas_l7policy_binding( context.session, policy.id) self.lbv2_driver.l7policy.successful_completion( context, policy, delete=True) vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v3/member_mgr.py0000666000175100017510000002705313244523345024750 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import locking from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v3 import lb_utils from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) class EdgeMemberManager(base_mgr.Nsxv3LoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self): super(EdgeMemberManager, self).__init__() @log_helpers.log_method_call def _get_info_from_fip(self, context, fip): filters = {'floating_ip_address': [fip]} floating_ips = self.core_plugin.get_floatingips(context, filters=filters) if floating_ips: return (floating_ips[0]['fixed_ip_address'], floating_ips[0]['router_id']) else: msg = (_('Cannot get floating ip %(fip)s provided from ' 'neutron db') % {'fip': fip}) raise n_exc.BadRequest(resource='lbaas-vip', msg=msg) @log_helpers.log_method_call def _create_lb_service(self, context, service_client, tenant_id, router_id, nsx_router_id, lb_id, lb_size): router = self.core_plugin.get_router(context, router_id) if not router.get('external_gateway_info'): msg = (_('Tenant router %(router)s does not connect to ' 'external gateway') % {'router': router['id']}) raise n_exc.BadRequest(resource='lbaas-lbservice-create', msg=msg) lb_name = utils.get_name_and_uuid(router['name'] or 'router', router_id) tags = lb_utils.get_tags(self.core_plugin, router_id, lb_const.LR_ROUTER_TYPE, tenant_id, context.project_name) attachment = {'target_id': nsx_router_id, 'target_type': 'LogicalRouter'} lb_service = service_client.create(display_name=lb_name, tags=tags, attachment=attachment, size=lb_size) # Update router to enable advertise_lb_vip flag self.core_plugin.nsxlib.logical_router.update_advertisement( nsx_router_id, advertise_lb_vip=True) return lb_service def _get_updated_pool_members(self, context, lb_pool, member): network = lb_utils.get_network_from_subnet( context, self.core_plugin, member.subnet_id) if network.get('router:external'): fixed_ip, router_id = self._get_info_from_fip( context, member.address) else: fixed_ip = member.address for m in lb_pool['members']: if m['ip_address'] == fixed_ip: m['display_name'] = member.name[:219] + '_' + member.id m['weight'] = member.weight return lb_pool['members'] @log_helpers.log_method_call def _add_loadbalancer_binding(self, context, lb_id, lbs_id, nsx_router_id, vip_address): # First check if there is already binding for the lb. # If there is no binding for the lb, add the db binding. binding = nsx_db.get_nsx_lbaas_loadbalancer_binding( context.session, lb_id) if not binding: nsx_db.add_nsx_lbaas_loadbalancer_binding( context.session, lb_id, lbs_id, nsx_router_id, vip_address) else: LOG.debug("LB binding has already been added, and no need " "to add here.") @log_helpers.log_method_call def create(self, context, member): with locking.LockManager.get_lock('member-%s' % str(member.pool.loadbalancer_id)): self._member_create(context, member) def _member_create(self, context, member): lb_id = member.pool.loadbalancer_id pool_id = member.pool.id loadbalancer = member.pool.loadbalancer if not lb_utils.validate_lb_subnet(context, self.core_plugin, member.subnet_id): msg = (_('Cannot add member %(member)s to pool as member subnet ' '%(subnet)s is neither public nor connected to router') % {'member': member.id, 'subnet': member.subnet_id}) raise n_exc.BadRequest(resource='lbaas-subnet', msg=msg) pool_client = self.core_plugin.nsxlib.load_balancer.pool service_client = self.core_plugin.nsxlib.load_balancer.service network = lb_utils.get_network_from_subnet( context, self.core_plugin, member.subnet_id) if network.get('router:external'): router_id, fixed_ip = self._get_info_from_fip( context, member.address) else: router_id = lb_utils.get_router_from_network( context, self.core_plugin, member.subnet_id) fixed_ip = member.address binding = nsx_db.get_nsx_lbaas_pool_binding(context.session, lb_id, pool_id) if binding: vs_id = binding.get('lb_vs_id') lb_pool_id = binding.get('lb_pool_id') lb_binding = nsx_db.get_nsx_lbaas_loadbalancer_binding( context.session, lb_id) if not lb_binding: nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) lb_service = service_client.get_router_lb_service( nsx_router_id) if not lb_service: lb_size = lb_utils.get_lb_flavor_size( self.flavor_plugin, context, loadbalancer.flavor_id) lb_service = self._create_lb_service( context, service_client, member.tenant_id, router_id, nsx_router_id, loadbalancer.id, lb_size) if lb_service: lb_service_id = lb_service['id'] self._add_loadbalancer_binding( context, loadbalancer.id, lb_service_id, nsx_router_id, loadbalancer.vip_address) if vs_id: try: service_client.add_virtual_server(lb_service_id, vs_id) except nsxlib_exc.ManagerError: self.lbv2_driver.member.failed_completion(context, member) msg = (_('Failed to attach virtual server %(vs)s ' 'to lb service %(service)s') % {'vs': vs_id, 'service': lb_service_id}) raise n_exc.BadRequest(resource='lbaas-member', msg=msg) else: msg = (_('Failed to get lb service to attach virtual ' 'server %(vs)s for member %(member)s') % {'vs': vs_id, 'member': member['id']}) raise nsx_exc.NsxPluginException(err_msg=msg) lb_pool = pool_client.get(lb_pool_id) old_m = lb_pool.get('members', None) new_m = [{'display_name': member.name[:219] + '_' + member.id, 'ip_address': fixed_ip, 'port': member.protocol_port, 'weight': member.weight}] members = (old_m + new_m) if old_m else new_m pool_client.update_pool_with_members(lb_pool_id, members) else: msg = (_('Failed to get pool binding to add member %s') % member['id']) raise nsx_exc.NsxPluginException(err_msg=msg) self.lbv2_driver.member.successful_completion(context, member) @log_helpers.log_method_call def update(self, context, old_member, new_member): lb_id = old_member.pool.loadbalancer_id pool_id = old_member.pool.id pool_client = self.core_plugin.nsxlib.load_balancer.pool pool_binding = nsx_db.get_nsx_lbaas_pool_binding( context.session, lb_id, pool_id) if pool_binding: lb_pool_id = pool_binding.get('lb_pool_id') try: lb_pool = pool_client.get(lb_pool_id) updated_members = self._get_updated_pool_members( context, lb_pool, new_member) pool_client.update_pool_with_members(lb_pool_id, updated_members) except Exception as e: with excutils.save_and_reraise_exception(): self.lbv2_driver.member.failed_completion( context, new_member) LOG.error('Failed to update member %(member)s: ' '%(err)s', {'member': old_member.id, 'err': e}) self.lbv2_driver.member.successful_completion( context, new_member) @log_helpers.log_method_call def delete(self, context, member): lb_id = member.pool.loadbalancer_id pool_id = member.pool.id pool_client = self.core_plugin.nsxlib.load_balancer.pool pool_binding = nsx_db.get_nsx_lbaas_pool_binding( context.session, lb_id, pool_id) if pool_binding: lb_pool_id = pool_binding.get('lb_pool_id') try: lb_pool = pool_client.get(lb_pool_id) network = lb_utils.get_network_from_subnet( context, self.core_plugin, member.subnet_id) if network.get('router:external'): fixed_ip, router_id = self._get_info_from_fip( context, member.address) else: fixed_ip = member.address if 'members' in lb_pool: m_list = lb_pool['members'] members = [m for m in m_list if m['ip_address'] != fixed_ip] pool_client.update_pool_with_members(lb_pool_id, members) except nsxlib_exc.ManagerError: self.lbv2_driver.member.failed_completion(context, member) msg = _('Failed to remove member from pool on NSX backend') raise n_exc.BadRequest(resource='lbaas-member', msg=msg) self.lbv2_driver.member.successful_completion( context, member, delete=True) vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v3/__init__.py0000666000175100017510000000000013244523345024352 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v3/healthmonitor_mgr.py0000666000175100017510000001364513244523345026360 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v3 import lb_utils from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) class EdgeHealthMonitorManager(base_mgr.Nsxv3LoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self): super(EdgeHealthMonitorManager, self).__init__() @log_helpers.log_method_call def _build_monitor_args(self, hm): if hm.type in lb_const.NSXV3_MONITOR_MAP: monitor_type = lb_const.NSXV3_MONITOR_MAP.get(hm.type) else: msg = (_('Cannot create health monitor %(monitor)s with ' 'type %(type)s') % {'monitor': hm.id, 'type': hm.type}) raise n_exc.InvalidInput(error_message=msg) body = {'resource_type': monitor_type, 'interval': hm.delay, 'fall_count': hm.max_retries, 'timeout': hm.timeout} if monitor_type in [lb_const.LB_HEALTH_MONITOR_HTTP, lb_const.LB_HEALTH_MONITOR_HTTPS]: if hm.http_method: body['request_method'] = hm.http_method if hm.url_path: body['request_url'] = hm.url_path # TODO(tongl): nsxv3 backend doesn't support granular control # of expected_codes. So we ignore it and use default for now. # Once backend supports it, we can add it back. # if hm.expected_codes: # body['response_status'] = hm.expected_codes return body @log_helpers.log_method_call def create(self, context, hm): lb_id = hm.pool.loadbalancer_id pool_id = hm.pool.id pool_client = self.core_plugin.nsxlib.load_balancer.pool monitor_client = self.core_plugin.nsxlib.load_balancer.monitor monitor_name = utils.get_name_and_uuid(hm.name or 'monitor', hm.id) tags = lb_utils.get_tags(self.core_plugin, hm.id, lb_const.LB_HM_TYPE, hm.tenant_id, context.project_name) monitor_body = self._build_monitor_args(hm) try: lb_monitor = monitor_client.create( display_name=monitor_name, tags=tags, **monitor_body) except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): self.lbv2_driver.health_monitor.failed_completion(context, hm) binding = nsx_db.get_nsx_lbaas_pool_binding( context.session, lb_id, pool_id) if binding: lb_pool_id = binding['lb_pool_id'] try: pool_client.add_monitor_to_pool(lb_pool_id, lb_monitor['id']) except nsxlib_exc.ManagerError: self.lbv2_driver.health_monitor.failed_completion( context, hm) msg = _('Failed to attach monitor %(monitor)s to pool ' '%(pool)s') % {'monitor': lb_monitor['id'], 'pool': lb_pool_id} raise n_exc.BadRequest(resource='lbaas-hm', msg=msg) nsx_db.add_nsx_lbaas_monitor_binding( context.session, lb_id, pool_id, hm.id, lb_monitor['id'], lb_pool_id) self.lbv2_driver.health_monitor.successful_completion(context, hm) @log_helpers.log_method_call def update(self, context, old_hm, new_hm): self.lbv2_driver.health_monitor.successful_completion(context, new_hm) @log_helpers.log_method_call def delete(self, context, hm): lb_id = hm.pool.loadbalancer_id pool_id = hm.pool.id pool_client = self.core_plugin.nsxlib.load_balancer.pool monitor_client = self.core_plugin.nsxlib.load_balancer.monitor binding = nsx_db.get_nsx_lbaas_monitor_binding( context.session, lb_id, pool_id, hm.id) if binding: lb_monitor_id = binding['lb_monitor_id'] lb_pool_id = binding['lb_pool_id'] try: pool_client.remove_monitor_from_pool(lb_pool_id, lb_monitor_id) except nsxlib_exc.ManagerError as exc: LOG.error('Failed to remove monitor %(monitor)s from pool ' '%(pool)s with exception from nsx %(exc)s)', {'monitor': lb_monitor_id, 'pool': lb_pool_id, 'exc': exc}) try: monitor_client.delete(lb_monitor_id) except nsxlib_exc.ManagerError as exc: LOG.error('Failed to delete monitor %(monitor)s from ' 'backend with exception %(exc)s', {'monitor': lb_monitor_id, 'exc': exc}) nsx_db.delete_nsx_lbaas_monitor_binding(context.session, lb_id, pool_id, hm.id) self.lbv2_driver.health_monitor.successful_completion( context, hm, delete=True) vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v3/lb_utils.py0000666000175100017510000001640113244523345024444 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db import l3_db from neutron.services.flavors import flavors_plugin from neutron_lib import exceptions as n_exc from vmware_nsx._i18n import _ from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import lb_const from vmware_nsxlib.v3 import utils def get_tags(plugin, resource_id, resource_type, project_id, project_name): resource = {'project_id': project_id, 'id': resource_id} tags = plugin.nsxlib.build_v3_tags_payload( resource, resource_type=resource_type, project_name=project_name) return tags def get_network_from_subnet(context, plugin, subnet_id): subnet = plugin.get_subnet(context, subnet_id) if subnet: return plugin.get_network(context, subnet['network_id']) def get_router_from_network(context, plugin, subnet_id): subnet = plugin.get_subnet(context, subnet_id) network_id = subnet['network_id'] port_filters = {'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF], 'network_id': [network_id]} ports = plugin.get_ports(context, filters=port_filters) if ports: router = plugin.get_router(context, ports[0]['device_id']) if router.get('external_gateway_info'): return router['id'] def get_lb_router_id(context, plugin, lb): router_client = plugin.nsxlib.logical_router name = utils.get_name_and_uuid(lb.name or 'router', lb.id) tags = get_tags(plugin, lb.id, lb_const.LB_LB_TYPE, lb.tenant_id, context.project_name) edge_cluster_uuid = plugin._get_edge_cluster(plugin._default_tier0_router) lb_router = router_client.create(name, tags, edge_cluster_uuid) return lb_router def get_lb_flavor_size(flavor_plugin, context, flavor_id): if not flavor_id: return lb_const.DEFAULT_LB_SIZE else: flavor = flavors_plugin.FlavorsPlugin.get_flavor( flavor_plugin, context, flavor_id) flavor_size = flavor['name'] if flavor_size in lb_const.LB_FLAVOR_SIZES: return flavor_size.upper() else: err_msg = (_("Invalid flavor size %(flavor)s, only 'small', " "'medium', or 'large' are supported") % {'flavor': flavor_size}) raise n_exc.InvalidInput(error_message=err_msg) def validate_lb_subnet(context, plugin, subnet_id): '''Validate LB subnet before creating loadbalancer on it. To create a loadbalancer, the network has to be either an external network or private network that connects to a tenant router. The tenant router needs to connect to gateway. It will throw exception if the network doesn't meet this requirement. :param context: context :param plugin: core plugin :param subnet_id: loadbalancer's subnet id :return: True if subnet meet requirement, otherwise return False ''' network = get_network_from_subnet(context, plugin, subnet_id) valid_router = get_router_from_network( context, plugin, subnet_id) if network.get('router:external') or valid_router: return True else: return False def get_rule_match_conditions(policy): match_conditions = [] # values in rule have already been validated in LBaaS API, # we won't need to valid anymore in driver, and just get # the LB rule mapping from the dict. for rule in policy.rules: match_type = lb_const.LB_RULE_MATCH_TYPE[rule.compare_type] if rule.type == lb_const.L7_RULE_TYPE_COOKIE: header_value = rule.key + '=' + rule.value match_conditions.append( {'type': 'LbHttpRequestHeaderCondition', 'match_type': match_type, 'header_name': 'Cookie', 'header_value': header_value}) elif rule.type == lb_const.L7_RULE_TYPE_FILE_TYPE: match_conditions.append( {'type': 'LbHttpRequestUriCondition', 'match_type': match_type, 'uri': '*.' + rule.value}) elif rule.type == lb_const.L7_RULE_TYPE_HEADER: match_conditions.append( {'type': 'LbHttpRequestHeaderCondition', 'match_type': match_type, 'header_name': rule.key, 'header_value': rule.value}) elif rule.type == lb_const.L7_RULE_TYPE_HOST_NAME: match_conditions.append( {'type': 'LbHttpRequestHeaderCondition', 'match_type': match_type, 'header_name': 'Host', 'header_value': rule.value}) elif rule.type == lb_const.L7_RULE_TYPE_PATH: match_conditions.append( {'type': 'LbHttpRequestUriCondition', 'match_type': match_type, 'uri': rule.value}) else: msg = (_('l7rule type %(type)s is not supported in LBaaS') % {'type': rule.type}) raise n_exc.BadRequest(resource='lbaas-l7rule', msg=msg) return match_conditions def get_rule_actions(context, l7policy): lb_id = l7policy.listener.loadbalancer_id if l7policy.action == lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL: pool_binding = nsx_db.get_nsx_lbaas_pool_binding( context.session, lb_id, l7policy.redirect_pool_id) if pool_binding: lb_pool_id = pool_binding['lb_pool_id'] actions = [{'type': lb_const.LB_SELECT_POOL_ACTION, 'pool_id': lb_pool_id}] else: msg = _('Failed to get LB pool binding from nsx db') raise n_exc.BadRequest(resource='lbaas-l7rule-create', msg=msg) elif l7policy.action == lb_const.L7_POLICY_ACTION_REDIRECT_TO_URL: actions = [{'type': lb_const.LB_HTTP_REDIRECT_ACTION, 'redirect_status': lb_const.LB_HTTP_REDIRECT_STATUS, 'redirect_url': l7policy.redirect_url}] elif l7policy.action == lb_const.L7_POLICY_ACTION_REJECT: actions = [{'type': lb_const.LB_REJECT_ACTION, 'reply_status': lb_const.LB_HTTP_REJECT_STATUS}] else: msg = (_('Invalid l7policy action: %(action)s') % {'action': l7policy.action}) raise n_exc.BadRequest(resource='lbaas-l7rule-create', msg=msg) return actions def convert_l7policy_to_lb_rule(context, policy): return { 'match_conditions': get_rule_match_conditions(policy), 'actions': get_rule_actions(context, policy), 'phase': lb_const.LB_RULE_HTTP_FORWARDING, 'match_strategy': 'ALL' } def remove_rule_from_policy(rule): l7rules = rule.policy.rules rule.policy.rules = [r for r in l7rules if r.id != rule.id] vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v3/lb_driver_v2.py0000666000175100017510000001000513244523413025174 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import events from neutron_lib.callbacks import exceptions as nc_exc from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from oslo_log import helpers as log_helpers from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas.nsx_v3 import healthmonitor_mgr as hm_mgr from vmware_nsx.services.lbaas.nsx_v3 import l7policy_mgr from vmware_nsx.services.lbaas.nsx_v3 import l7rule_mgr from vmware_nsx.services.lbaas.nsx_v3 import listener_mgr from vmware_nsx.services.lbaas.nsx_v3 import loadbalancer_mgr as lb_mgr from vmware_nsx.services.lbaas.nsx_v3 import member_mgr from vmware_nsx.services.lbaas.nsx_v3 import pool_mgr LOG = logging.getLogger(__name__) class NotImplementedManager(object): """Helper class to make any subclass of LoadBalancerBaseDriver explode if it is missing any of the required object managers. """ def create(self, context, obj): raise NotImplementedError() def update(self, context, old_obj, obj): raise NotImplementedError() def delete(self, context, obj): raise NotImplementedError() class EdgeLoadbalancerDriverV2(object): @log_helpers.log_method_call def __init__(self): super(EdgeLoadbalancerDriverV2, self).__init__() self.loadbalancer = lb_mgr.EdgeLoadBalancerManager() self.listener = listener_mgr.EdgeListenerManager() self.pool = pool_mgr.EdgePoolManager() self.member = member_mgr.EdgeMemberManager() self.healthmonitor = hm_mgr.EdgeHealthMonitorManager() self.l7policy = l7policy_mgr.EdgeL7PolicyManager() self.l7rule = l7rule_mgr.EdgeL7RuleManager() self._subscribe_router_delete_callback() def _subscribe_router_delete_callback(self): # Check if there is any LB attachment for the NSX router. # This callback is subscribed here to prevent router deletion # if it still has LB service attached to it. registry.subscribe(self._check_lb_service_on_router, resources.ROUTER, events.BEFORE_DELETE) def _unsubscribe_router_delete_callback(self): registry.unsubscribe(self._check_lb_service_on_router, resources.ROUTER, events.BEFORE_DELETE) def _check_lb_service_on_router(self, resource, event, trigger, **kwargs): """Check if there is any lb service on nsx router""" nsx_router_id = nsx_db.get_nsx_router_id(kwargs['context'].session, kwargs['router_id']) nsxlib = self.loadbalancer.core_plugin.nsxlib service_client = nsxlib.load_balancer.service lb_service = service_client.get_router_lb_service(nsx_router_id) if lb_service: msg = _('Cannot delete router as it still has lb service ' 'attachment') raise nc_exc.CallbackFailure(msg) class DummyLoadbalancerDriverV2(object): @log_helpers.log_method_call def __init__(self): self.load_balancer = NotImplementedManager() self.listener = NotImplementedManager() self.pool = NotImplementedManager() self.member = NotImplementedManager() self.health_monitor = NotImplementedManager() self.l7policy = NotImplementedManager() self.l7rule = NotImplementedManager() vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v3/l7rule_mgr.py0000666000175100017510000000543113244523345024707 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas.nsx_v3 import lb_utils LOG = logging.getLogger(__name__) class EdgeL7RuleManager(base_mgr.Nsxv3LoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self): super(EdgeL7RuleManager, self).__init__() def _update_l7rule_change(self, context, rule, delete=False): rule_client = self.core_plugin.nsxlib.load_balancer.rule binding = nsx_db.get_nsx_lbaas_l7policy_binding(context.session, rule.policy.id) if not binding: self.lbv2_driver.l7rule.failed_completion(context, rule) msg = _('Cannot find nsx lbaas binding for policy ' '%(policy_id)s') % {'policy_id': rule.policy.id} raise n_exc.BadRequest(resource='lbaas-l7policy-update', msg=msg) lb_rule_id = binding['lb_rule_id'] if delete: lb_utils.remove_rule_from_policy(rule) rule_body = lb_utils.convert_l7policy_to_lb_rule(context, rule.policy) try: rule_client.update(lb_rule_id, **rule_body) except Exception as e: with excutils.save_and_reraise_exception(): self.lbv2_driver.l7rule.failed_completion(context, rule) LOG.error('Failed to update L7policy %(policy)s: ' '%(err)s', {'policy': rule.policy.id, 'err': e}) self.lbv2_driver.l7rule.successful_completion(context, rule, delete=delete) @log_helpers.log_method_call def create(self, context, rule): self._update_l7rule_change(context, rule) @log_helpers.log_method_call def update(self, context, old_rule, new_rule): self._update_l7rule_change(context, new_rule) @log_helpers.log_method_call def delete(self, context, rule): self._update_l7rule_change(context, rule, delete=True) vmware-nsx-12.0.1/vmware_nsx/services/lbaas/__init__.py0000666000175100017510000000000013244523345023132 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/lbaas/lb_const.py0000666000175100017510000001031113244523345023204 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. LB_METHOD_ROUND_ROBIN = 'ROUND_ROBIN' LB_METHOD_LEAST_CONNECTIONS = 'LEAST_CONNECTIONS' LB_METHOD_SOURCE_IP = 'SOURCE_IP' BALANCE_MAP = { LB_METHOD_ROUND_ROBIN: 'round-robin', LB_METHOD_LEAST_CONNECTIONS: 'leastconn', LB_METHOD_SOURCE_IP: 'ip-hash'} LB_PROTOCOL_TCP = 'TCP' LB_PROTOCOL_HTTP = 'HTTP' LB_PROTOCOL_HTTPS = 'HTTPS' LB_PROTOCOL_TERMINATED_HTTPS = 'TERMINATED_HTTPS' PROTOCOL_MAP = { LB_PROTOCOL_TCP: 'tcp', LB_PROTOCOL_HTTP: 'http', LB_PROTOCOL_HTTPS: 'https', LB_PROTOCOL_TERMINATED_HTTPS: 'https'} LB_HEALTH_MONITOR_PING = 'PING' LB_HEALTH_MONITOR_TCP = 'TCP' LB_HEALTH_MONITOR_HTTP = 'HTTP' LB_HEALTH_MONITOR_HTTPS = 'HTTPS' HEALTH_MONITOR_MAP = { LB_HEALTH_MONITOR_PING: 'icmp', LB_HEALTH_MONITOR_TCP: 'tcp', LB_HEALTH_MONITOR_HTTP: 'http', LB_HEALTH_MONITOR_HTTPS: 'tcp'} LB_SESSION_PERSISTENCE_SOURCE_IP = 'SOURCE_IP' LB_SESSION_PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE' LB_SESSION_PERSISTENCE_APP_COOKIE = 'APP_COOKIE' SESSION_PERSISTENCE_METHOD_MAP = { LB_SESSION_PERSISTENCE_SOURCE_IP: 'sourceip', LB_SESSION_PERSISTENCE_APP_COOKIE: 'cookie', LB_SESSION_PERSISTENCE_HTTP_COOKIE: 'cookie'} SESSION_PERSISTENCE_COOKIE_MAP = { LB_SESSION_PERSISTENCE_APP_COOKIE: 'app', LB_SESSION_PERSISTENCE_HTTP_COOKIE: 'insert'} L7_POLICY_ACTION_REJECT = 'REJECT' L7_POLICY_ACTION_REDIRECT_TO_POOL = 'REDIRECT_TO_POOL' L7_POLICY_ACTION_REDIRECT_TO_URL = 'REDIRECT_TO_URL' L7_RULE_TYPE_HOST_NAME = 'HOST_NAME' L7_RULE_TYPE_PATH = 'PATH' L7_RULE_TYPE_FILE_TYPE = 'FILE_TYPE' L7_RULE_TYPE_HEADER = 'HEADER' L7_RULE_TYPE_COOKIE = 'COOKIE' L7_RULE_COMPARE_TYPE_REGEX = 'REGEX' L7_RULE_COMPARE_TYPE_STARTS_WITH = 'STARTS_WITH' L7_RULE_COMPARE_TYPE_ENDS_WITH = 'ENDS_WITH' L7_RULE_COMPARE_TYPE_CONTAINS = 'CONTAINS' L7_RULE_COMPARE_TYPE_EQUAL_TO = 'EQUAL_TO' # Resource type for resources created on NSX backend LB_LB_TYPE = 'os-lbaas-lb-id' LB_LB_NAME = 'os-lbaas-lb-name' LB_LISTENER_TYPE = 'os-lbaas-listener-id' LB_HM_TYPE = 'os-lbaas-hm-id' LB_POOL_TYPE = 'os-lbaas-pool-id' LB_L7POLICY_TYPE = 'os-lbaas-l7policy-id' LB_HTTP_PROFILE = 'LbHttpProfile' LB_TCP_PROFILE = 'LbFastTcpProfile' LB_UDP_PROFILE = 'LbFastUdpProfile' NSXV3_MONITOR_MAP = {LB_HEALTH_MONITOR_PING: 'LbIcmpMonitor', LB_HEALTH_MONITOR_TCP: 'LbTcpMonitor', LB_HEALTH_MONITOR_HTTP: 'LbHttpMonitor', LB_HEALTH_MONITOR_HTTPS: 'LbHttpsMonitor'} LB_POOL_ALGORITHM_MAP = { LB_METHOD_ROUND_ROBIN: 'WEIGHTED_ROUND_ROBIN', LB_METHOD_LEAST_CONNECTIONS: 'LEAST_CONNECTION', LB_METHOD_SOURCE_IP: 'IP_HASH', } LB_STATS_MAP = {'active_connections': 'current_sessions', 'bytes_in': 'bytes_in', 'bytes_out': 'bytes_out', 'total_connections': 'total_sessions'} LR_ROUTER_TYPE = 'os-neutron-router-id' LR_PORT_TYPE = 'os-neutron-rport-id' LB_CERT_RESOURCE_TYPE = ['certificate_signed', 'certificate_self_signed'] DEFAULT_LB_SIZE = 'SMALL' LB_FLAVOR_SIZES = ['SMALL', 'MEDIUM', 'LARGE', 'small', 'medium', 'large'] LB_RULE_MATCH_TYPE = { L7_RULE_COMPARE_TYPE_CONTAINS: 'CONTAINS', L7_RULE_COMPARE_TYPE_ENDS_WITH: 'ENDS_WITH', L7_RULE_COMPARE_TYPE_EQUAL_TO: 'EQUALS', L7_RULE_COMPARE_TYPE_REGEX: 'REGEX', L7_RULE_COMPARE_TYPE_STARTS_WITH: 'STARTS_WITH'} LB_SELECT_POOL_ACTION = 'LbSelectPoolAction' LB_HTTP_REDIRECT_ACTION = 'LbHttpRedirectAction' LB_REJECT_ACTION = 'LbHttpRejectAction' LB_HTTP_REDIRECT_STATUS = '302' LB_HTTP_REJECT_STATUS = '403' LB_RULE_HTTP_REQUEST_REWRITE = 'HTTP_REQUEST_REWRITE' LB_RULE_HTTP_FORWARDING = 'HTTP_FORWARDING' LB_RULE_HTTP_RESPONSE_REWRITE = 'HTTP_RESPONSE_REWRITE' vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx/0000775000175100017510000000000013244524600021634 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx/__init__.py0000666000175100017510000000000013244523345023742 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx/lb_driver_v2.py0000666000175100017510000002124513244523345024600 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from neutron_lib import exceptions as n_exc from vmware_nsx.services.lbaas import base_mgr LOG = logging.getLogger(__name__) class EdgeLoadbalancerDriverV2(object): @log_helpers.log_method_call def __init__(self): super(EdgeLoadbalancerDriverV2, self).__init__() self.loadbalancer = EdgeLoadBalancerManager() self.listener = EdgeListenerManager() self.pool = EdgePoolManager() self.member = EdgeMemberManager() self.healthmonitor = EdgeHealthMonitorManager() self.l7policy = EdgeL7PolicyManager() self.l7rule = EdgeL7RuleManager() class EdgeLoadBalancerManager(base_mgr.LoadbalancerBaseManager): @log_helpers.log_method_call def create(self, context, lb): # verify that the subnet belongs to the same plugin as the lb lb_p = self.core_plugin._get_plugin_from_project(context, lb.tenant_id) subnet_p = self.core_plugin._get_subnet_plugin_by_id( context, lb.vip_subnet_id) if lb_p.plugin_type() != subnet_p.plugin_type(): self.lbv2_driver.load_balancer.failed_completion(context, lb) msg = (_('Subnet must belong to the plugin %s, as the ' 'loadbalancer.') % lb_p.plugin_type()) raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) return lb_p.lbv2_driver.loadbalancer.create(context, lb) @log_helpers.log_method_call def update(self, context, old_lb, new_lb): p = self.core_plugin._get_plugin_from_project(context, new_lb.tenant_id) return p.lbv2_driver.loadbalancer.update(context, old_lb, new_lb) @log_helpers.log_method_call def delete(self, context, lb): p = self.core_plugin._get_plugin_from_project(context, lb.tenant_id) return p.lbv2_driver.loadbalancer.delete(context, lb) @log_helpers.log_method_call def refresh(self, context, lb): p = self.core_plugin._get_plugin_from_project(context, lb.tenant_id) return p.lbv2_driver.loadbalancer.refresh(context, lb) @log_helpers.log_method_call def stats(self, context, lb): p = self.core_plugin._get_plugin_from_project(context, lb.tenant_id) return p.lbv2_driver.loadbalancer.stats(context, lb) class EdgeListenerManager(base_mgr.LoadbalancerBaseManager): @log_helpers.log_method_call def create(self, context, listener, certificate=None): p = self.core_plugin._get_plugin_from_project(context, listener.tenant_id) return p.lbv2_driver.listener.create(context, listener, certificate=certificate) @log_helpers.log_method_call def update(self, context, old_listener, new_listener, certificate=None): p = self.core_plugin._get_plugin_from_project(context, new_listener.tenant_id) return p.lbv2_driver.listener.update(context, old_listener, new_listener, certificate=certificate) @log_helpers.log_method_call def delete(self, context, listener): p = self.core_plugin._get_plugin_from_project(context, listener.tenant_id) return p.lbv2_driver.listener.delete(context, listener) class EdgePoolManager(base_mgr.LoadbalancerBaseManager): @log_helpers.log_method_call def create(self, context, pool): p = self.core_plugin._get_plugin_from_project(context, pool.tenant_id) return p.lbv2_driver.pool.create(context, pool) @log_helpers.log_method_call def update(self, context, old_pool, new_pool): p = self.core_plugin._get_plugin_from_project(context, new_pool.tenant_id) return p.lbv2_driver.pool.update(context, old_pool, new_pool) @log_helpers.log_method_call def delete(self, context, pool): p = self.core_plugin._get_plugin_from_project(context, pool.tenant_id) return p.lbv2_driver.pool.delete(context, pool) class EdgeMemberManager(base_mgr.LoadbalancerBaseManager): @log_helpers.log_method_call def create(self, context, member): p = self.core_plugin._get_plugin_from_project(context, member.tenant_id) return p.lbv2_driver.member.create(context, member) @log_helpers.log_method_call def update(self, context, old_member, new_member): p = self.core_plugin._get_plugin_from_project(context, new_member.tenant_id) return p.lbv2_driver.member.update(context, old_member, new_member) @log_helpers.log_method_call def delete(self, context, member): p = self.core_plugin._get_plugin_from_project(context, member.tenant_id) return p.lbv2_driver.member.delete(context, member) class EdgeHealthMonitorManager(base_mgr.LoadbalancerBaseManager): @log_helpers.log_method_call def create(self, context, hm): p = self.core_plugin._get_plugin_from_project(context, hm.tenant_id) return p.lbv2_driver.healthmonitor.create(context, hm) @log_helpers.log_method_call def update(self, context, old_hm, new_hm): p = self.core_plugin._get_plugin_from_project(context, new_hm.tenant_id) return p.lbv2_driver.healthmonitor.update(context, old_hm, new_hm) @log_helpers.log_method_call def delete(self, context, hm): p = self.core_plugin._get_plugin_from_project(context, hm.tenant_id) return p.lbv2_driver.healthmonitor.delete(context, hm) class EdgeL7PolicyManager(base_mgr.LoadbalancerBaseManager): @log_helpers.log_method_call def create(self, context, policy): p = self.core_plugin._get_plugin_from_project(context, policy.tenant_id) return p.lbv2_driver.l7policy.create(context, policy) @log_helpers.log_method_call def update(self, context, old_policy, new_policy): p = self.core_plugin._get_plugin_from_project(context, new_policy.tenant_id) return p.lbv2_driver.l7policy.update(context, old_policy, new_policy) @log_helpers.log_method_call def delete(self, context, policy): p = self.core_plugin._get_plugin_from_project(context, policy.tenant_id) return p.lbv2_driver.l7policy.delete(context, policy) class EdgeL7RuleManager(base_mgr.LoadbalancerBaseManager): @log_helpers.log_method_call def create(self, context, rule): p = self.core_plugin._get_plugin_from_project(context, rule.tenant_id) return p.lbv2_driver.l7rule.create(context, rule) @log_helpers.log_method_call def update(self, context, old_rule, new_rule): p = self.core_plugin._get_plugin_from_project(context, new_rule.tenant_id) return p.lbv2_driver.l7rule.update(context, old_rule, new_rule) @log_helpers.log_method_call def delete(self, context, rule): p = self.core_plugin._get_plugin_from_project(context, rule.tenant_id) return p.lbv2_driver.l7rule.delete(context, rule) vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx/plugin.py0000666000175100017510000000217413244523345023517 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lbaas.services.loadbalancer import plugin from vmware_nsx.plugins.nsx import utils as tvd_utils @tvd_utils.filter_plugins class LoadBalancerTVPluginV2(plugin.LoadBalancerPluginv2): """NSX-TV plugin for LBaaS V2. This plugin adds separation between T/V instances """ methods_to_separate = ['get_loadbalancers', 'get_listeners', 'get_pools', 'get_healthmonitors', 'get_l7policies'] vmware-nsx-12.0.1/vmware_nsx/services/lbaas/base_mgr.py0000666000175100017510000000603113244523345023164 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.plugins import constants as plugin_const from neutron_lib.plugins import directory from oslo_log import log as logging from vmware_nsx.extensions import projectpluginmap LOG = logging.getLogger(__name__) class LoadbalancerBaseManager(object): _lbv2_driver = None _core_plugin = None _flavor_plugin = None def __init__(self): super(LoadbalancerBaseManager, self).__init__() def _get_plugin(self, plugin_type): return directory.get_plugin(plugin_type) @property def lbv2_driver(self): if not self._lbv2_driver: plugin = self._get_plugin( plugin_const.LOADBALANCERV2) self._lbv2_driver = ( plugin.drivers['vmwareedge']) return self._lbv2_driver @property def core_plugin(self): if not self._core_plugin: self._core_plugin = ( self._get_plugin(plugin_const.CORE)) return self._core_plugin @property def flavor_plugin(self): if not self._flavor_plugin: self._flavor_plugin = ( self._get_plugin(plugin_const.FLAVORS)) return self._flavor_plugin class EdgeLoadbalancerBaseManager(LoadbalancerBaseManager): def __init__(self, vcns_driver): super(EdgeLoadbalancerBaseManager, self).__init__() self.vcns_driver = vcns_driver @property def vcns(self): return self.vcns_driver.vcns @property def core_plugin(self): if not self._core_plugin: self._core_plugin = ( self._get_plugin(plugin_const.CORE)) if self._core_plugin.is_tvd_plugin(): # get the plugin that match this driver self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_V) return self._core_plugin class Nsxv3LoadbalancerBaseManager(LoadbalancerBaseManager): def __init__(self): super(Nsxv3LoadbalancerBaseManager, self).__init__() @property def core_plugin(self): if not self._core_plugin: self._core_plugin = ( self._get_plugin(plugin_const.CORE)) if self._core_plugin.is_tvd_plugin(): # get the plugin that match this driver self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_T) return self._core_plugin vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v/0000775000175100017510000000000013244524600022161 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v/lbaas_common.py0000666000175100017510000002551713244523345025206 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_log import log as logging from neutron_lib import constants from neutron_lib import exceptions as n_exc from vmware_nsx._i18n import _ from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield import edge_utils LOG = logging.getLogger(__name__) MEMBER_ID_PFX = 'member-' RESOURCE_ID_PFX = 'lbaas-' def get_member_id(member_id): return MEMBER_ID_PFX + member_id def get_lb_resource_id(lb_id): return (RESOURCE_ID_PFX + lb_id)[:36] def get_lb_edge_name(context, lb_id): """Look for the resource name of the edge hosting the LB. For older loadbalancers this may be a router edge """ binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) if binding: edge_binding = nsxv_db.get_nsxv_router_binding_by_edge( context.session, binding['edge_id']) if edge_binding: return edge_binding['router_id'] # fallback return get_lb_resource_id(lb_id) def get_lb_interface(context, plugin, lb_id, subnet_id): filters = {'fixed_ips': {'subnet_id': [subnet_id]}, 'device_id': [lb_id], 'device_owner': [constants.DEVICE_OWNER_NEUTRON_PREFIX + 'LB']} lb_ports = plugin.get_ports(context.elevated(), filters=filters) return lb_ports def create_lb_interface(context, plugin, lb_id, subnet_id, tenant_id, vip_addr=None, subnet=None): if not subnet: subnet = plugin.get_subnet(context, subnet_id) network_id = subnet.get('network_id') port_dict = {'name': 'lb_if-' + lb_id, 'admin_state_up': True, 'network_id': network_id, 'tenant_id': tenant_id, 'fixed_ips': [{'subnet_id': subnet['id']}], 'device_owner': constants.DEVICE_OWNER_NEUTRON_PREFIX + 'LB', 'device_id': lb_id, 'mac_address': constants.ATTR_NOT_SPECIFIED } port = plugin.base_create_port(context, {'port': port_dict}) ip_addr = port['fixed_ips'][0]['ip_address'] net = netaddr.IPNetwork(subnet['cidr']) resource_id = get_lb_edge_name(context, lb_id) address_groups = [{'primaryAddress': ip_addr, 'subnetPrefixLength': str(net.prefixlen), 'subnetMask': str(net.netmask)}] if vip_addr: address_groups[0]['secondaryAddresses'] = { 'type': 'secondary_addresses', 'ipAddress': [vip_addr]} edge_utils.update_internal_interface( plugin.nsx_v, context, resource_id, network_id, address_groups) def delete_lb_interface(context, plugin, lb_id, subnet_id): resource_id = get_lb_edge_name(context, lb_id) subnet = plugin.get_subnet(context, subnet_id) network_id = subnet.get('network_id') lb_ports = get_lb_interface(context, plugin, lb_id, subnet_id) for lb_port in lb_ports: plugin.delete_port(context, lb_port['id']) edge_utils.delete_interface(plugin.nsx_v, context, resource_id, network_id, dist=False) def get_lbaas_edge_id(context, plugin, lb_id, vip_addr, subnet_id, tenant_id, appliance_size): subnet = plugin.get_subnet(context, subnet_id) network_id = subnet.get('network_id') availability_zone = plugin.get_network_az_by_net_id(context, network_id) resource_id = get_lb_resource_id(lb_id) edge_id = plugin.edge_manager.allocate_lb_edge_appliance( context, resource_id, availability_zone=availability_zone, appliance_size=appliance_size) create_lb_interface(context, plugin, lb_id, subnet_id, tenant_id, vip_addr=vip_addr, subnet=subnet) gw_ip = subnet.get('gateway_ip') if gw_ip or subnet['host_routes']: routes = [{'cidr': r['destination'], 'nexthop': r['nexthop']} for r in subnet['host_routes']] plugin.nsx_v.update_routes(edge_id, gw_ip, routes) return edge_id def find_address_in_same_subnet(ip_addr, address_groups): """ Lookup an address group with a matching subnet to ip_addr. If found, return address_group. """ for address_group in address_groups['addressGroups']: net_addr = '%(primaryAddress)s/%(subnetPrefixLength)s' % address_group if netaddr.IPAddress(ip_addr) in netaddr.IPNetwork(net_addr): return address_group def add_address_to_address_groups(ip_addr, address_groups): """ Add ip_addr as a secondary IP address to an address group which belongs to the same subnet. """ address_group = find_address_in_same_subnet( ip_addr, address_groups) if address_group: sec_addr = address_group.get('secondaryAddresses') if not sec_addr: sec_addr = { 'type': 'secondary_addresses', 'ipAddress': [ip_addr]} else: sec_addr['ipAddress'].append(ip_addr) address_group['secondaryAddresses'] = sec_addr return True return False def del_address_from_address_groups(ip_addr, address_groups): """ Delete ip_addr from secondary address list in address groups. """ address_group = find_address_in_same_subnet(ip_addr, address_groups) if address_group: sec_addr = address_group.get('secondaryAddresses') if sec_addr and ip_addr in sec_addr['ipAddress']: sec_addr['ipAddress'].remove(ip_addr) return True return False def vip_as_secondary_ip(vcns, edge_id, vip, handler): with locking.LockManager.get_lock(edge_id): r = vcns.get_interfaces(edge_id)[1] vnics = r.get('vnics', []) for vnic in vnics: if vnic['type'] == 'trunk': for sub_interface in vnic.get('subInterfaces', {}).get( 'subInterfaces', []): address_groups = sub_interface.get('addressGroups') if handler(vip, address_groups): vcns.update_interface(edge_id, vnic) return True else: address_groups = vnic.get('addressGroups') if handler(vip, address_groups): vcns.update_interface(edge_id, vnic) return True return False def add_vip_as_secondary_ip(vcns, edge_id, vip): """ Edge appliance requires that a VIP will be configured as a primary or a secondary IP address on an interface. To do so, we locate an interface which is connected to the same subnet that vip belongs to. This can be a regular interface, on a sub-interface on a trunk. """ if not vip_as_secondary_ip(vcns, edge_id, vip, add_address_to_address_groups): msg = _('Failed to add VIP %(vip)s as secondary IP on ' 'Edge %(edge_id)s') % {'vip': vip, 'edge_id': edge_id} raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) def del_vip_as_secondary_ip(vcns, edge_id, vip): """ While removing vip, delete the secondary interface from Edge config. """ if not vip_as_secondary_ip(vcns, edge_id, vip, del_address_from_address_groups): msg = _('Failed to delete VIP %(vip)s as secondary IP on ' 'Edge %(edge_id)s') % {'vip': vip, 'edge_id': edge_id} raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) def extract_resource_id(location_uri): """ Edge assigns an ID for each resource that is being created: it is postfixes the uri specified in the Location header. This ID should be used while updating/deleting this resource. """ uri_elements = location_uri.split('/') return uri_elements[-1] def set_lb_firewall_default_rule(vcns, edge_id, action): with locking.LockManager.get_lock(edge_id): vcns.update_firewall_default_policy(edge_id, {'action': action}) def add_vip_fw_rule(vcns, edge_id, vip_id, ip_address): fw_rule = { 'firewallRules': [ {'action': 'accept', 'destination': { 'ipAddress': [ip_address]}, 'enabled': True, 'name': vip_id}]} with locking.LockManager.get_lock(edge_id): h = vcns.add_firewall_rule(edge_id, fw_rule)[0] fw_rule_id = extract_resource_id(h['location']) return fw_rule_id def del_vip_fw_rule(vcns, edge_id, vip_fw_rule_id): with locking.LockManager.get_lock(edge_id): vcns.delete_firewall_rule(edge_id, vip_fw_rule_id) def get_edge_ip_addresses(vcns, edge_id): edge_ips = [] r = vcns.get_interfaces(edge_id)[1] vnics = r.get('vnics', []) for vnic in vnics: if vnic['type'] == 'trunk': for sub_interface in vnic.get('subInterfaces', {}).get( 'subInterfaces', []): address_groups = sub_interface.get('addressGroups') for address_group in address_groups['addressGroups']: edge_ips.append(address_group['primaryAddress']) else: address_groups = vnic.get('addressGroups') for address_group in address_groups['addressGroups']: edge_ips.append(address_group['primaryAddress']) return edge_ips def enable_edge_acceleration(vcns, edge_id): with locking.LockManager.get_lock(edge_id): # Query the existing load balancer config in case metadata lb is set _, config = vcns.get_loadbalancer_config(edge_id) config['accelerationEnabled'] = True config['enabled'] = True config['featureType'] = 'loadbalancer_4.0' vcns.enable_service_loadbalancer(edge_id, config) def is_lb_on_router_edge(context, core_plugin, edge_id): binding = nsxv_db.get_nsxv_router_binding_by_edge( context.session, edge_id) router_id = binding['router_id'] if router_id.startswith(RESOURCE_ID_PFX): # New lbaas edge return False # verify that this is a router (and an exclusive one) try: router = core_plugin.get_router(context, router_id) if router.get('router_type') == 'exclusive': return True except Exception: pass LOG.error("Edge %(edge)s router %(rtr)s is not an lbaas edge, but also " "not an exclusive router", {'edge': edge_id, 'rtr': router_id}) return False vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v/v2/0000775000175100017510000000000013244524600022510 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v/v2/healthmon_mgr.py0000666000175100017510000001772213244523345025726 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from neutron_lib import exceptions as n_exc from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxv_exc from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common LOG = logging.getLogger(__name__) class EdgeHealthMonitorManager(base_mgr.EdgeLoadbalancerBaseManager): @log_helpers.log_method_call def _convert_lbaas_monitor(self, hm): """ Transform OpenStack health monitor dict to NSXv health monitor dict. """ mon = { 'type': lb_const.HEALTH_MONITOR_MAP.get(hm.type, 'icmp'), 'interval': hm.delay, 'timeout': hm.timeout, 'maxRetries': hm.max_retries, 'name': hm.id} if hm.http_method: mon['method'] = hm.http_method if hm.url_path: mon['url'] = hm.url_path return mon @log_helpers.log_method_call def __init__(self, vcns_driver): super(EdgeHealthMonitorManager, self).__init__(vcns_driver) @log_helpers.log_method_call def create(self, context, hm): lb_id = hm.pool.loadbalancer_id lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) edge_id = lb_binding['edge_id'] pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, hm.pool.id) if not pool_binding: self.lbv2_driver.health_monitor.failed_completion( context, hm) msg = _('Failed to create health monitor on edge: %s. ' 'Binding not found') % edge_id LOG.error(msg) raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) edge_pool_id = pool_binding['edge_pool_id'] hm_binding = nsxv_db.get_nsxv_lbaas_monitor_binding( context.session, lb_id, hm.pool.id, hm.id, edge_id) edge_mon_id = None if hm_binding: edge_mon_id = hm_binding['edge_mon_id'] else: edge_monitor = self._convert_lbaas_monitor(hm) try: with locking.LockManager.get_lock(edge_id): h = self.vcns.create_health_monitor(edge_id, edge_monitor)[0] edge_mon_id = lb_common.extract_resource_id(h['location']) nsxv_db.add_nsxv_lbaas_monitor_binding( context.session, lb_id, hm.pool.id, hm.id, edge_id, edge_mon_id) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.health_monitor.failed_completion( context, hm) LOG.error('Failed to create health monitor on edge: %s', edge_id) try: # Associate monitor with Edge pool with locking.LockManager.get_lock(edge_id): edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1] if edge_pool.get('monitorId'): edge_pool['monitorId'].append(edge_mon_id) else: edge_pool['monitorId'] = [edge_mon_id] self.vcns.update_pool(edge_id, edge_pool_id, edge_pool) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.health_monitor.failed_completion(context, hm) LOG.error( 'Failed to create health monitor on edge: %s', edge_id) self.lbv2_driver.health_monitor.successful_completion(context, hm) @log_helpers.log_method_call def update(self, context, old_hm, new_hm): lb_id = new_hm.pool.loadbalancer_id lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) edge_id = lb_binding['edge_id'] hm_binding = nsxv_db.get_nsxv_lbaas_monitor_binding( context.session, lb_id, new_hm.pool.id, new_hm.id, edge_id) edge_monitor = self._convert_lbaas_monitor(new_hm) try: with locking.LockManager.get_lock(edge_id): self.vcns.update_health_monitor(edge_id, hm_binding['edge_mon_id'], edge_monitor) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.health_monitor.failed_completion(context, new_hm) LOG.error('Failed to update monitor on edge: %s', edge_id) self.lbv2_driver.health_monitor.successful_completion(context, new_hm) @log_helpers.log_method_call def delete(self, context, hm): pool_id = hm.pool.id lb_id = hm.pool.loadbalancer_id lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) edge_id = lb_binding['edge_id'] pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, pool_id) if not pool_binding: nsxv_db.del_nsxv_lbaas_monitor_binding( context.session, lb_id, pool_id, hm.id, edge_id) self.lbv2_driver.health_monitor.successful_completion( context, hm, delete=True) return edge_pool_id = pool_binding['edge_pool_id'] hm_binding = nsxv_db.get_nsxv_lbaas_monitor_binding( context.session, lb_id, pool_id, hm.id, edge_id) edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1] if hm_binding['edge_mon_id'] in edge_pool['monitorId']: edge_pool['monitorId'].remove(hm_binding['edge_mon_id']) try: with locking.LockManager.get_lock(edge_id): self.vcns.update_pool(edge_id, edge_pool_id, edge_pool) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.health_monitor.failed_completion(context, hm) LOG.error('Failed to delete monitor mapping on edge: %s', edge_id) # If this monitor is not used on this edge anymore, delete it if not edge_pool['monitorId']: try: with locking.LockManager.get_lock(edge_id): self.vcns.delete_health_monitor(hm_binding['edge_id'], hm_binding['edge_mon_id']) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.health_monitor.failed_completion(context, hm) LOG.error('Failed to delete monitor on edge: %s', edge_id) nsxv_db.del_nsxv_lbaas_monitor_binding( context.session, lb_id, pool_id, hm.id, edge_id) self.lbv2_driver.health_monitor.successful_completion( context, hm, delete=True) vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v/v2/listener_mgr.py0000666000175100017510000003107513244523345025571 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as vcns_exc from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common LOG = logging.getLogger(__name__) def listener_to_edge_app_profile(listener, edge_cert_id): edge_app_profile = { 'insertXForwardedFor': False, 'name': listener.id, 'serverSslEnabled': False, 'sslPassthrough': False, 'template': lb_const.PROTOCOL_MAP[listener.protocol], } if (listener.protocol == lb_const.LB_PROTOCOL_HTTPS or listener.protocol == lb_const.LB_PROTOCOL_TERMINATED_HTTPS): if edge_cert_id: edge_app_profile['clientSsl'] = { 'caCertificate': [], 'clientAuth': 'ignore', 'crlCertificate': [], 'serviceCertificate': [edge_cert_id]} else: edge_app_profile['sslPassthrough'] = True if listener.default_pool: if listener.default_pool.session_persistence: pool_sess_persist = listener.default_pool.session_persistence sess_persist_type = pool_sess_persist.type persistence = { 'method': lb_const.SESSION_PERSISTENCE_METHOD_MAP.get( sess_persist_type)} if (sess_persist_type in lb_const.SESSION_PERSISTENCE_COOKIE_MAP): cookie_name = getattr(pool_sess_persist, 'cookie_name', None) if cookie_name is None: cookie_name = 'default_cookie_name' persistence.update({ 'cookieName': cookie_name, 'cookieMode': lb_const.SESSION_PERSISTENCE_COOKIE_MAP[ sess_persist_type]}) edge_app_profile['persistence'] = persistence return edge_app_profile def listener_to_edge_vse(context, listener, vip_address, default_pool, app_profile_id): if listener.connection_limit: connection_limit = max(0, listener.connection_limit) else: connection_limit = 0 vse = { 'name': 'vip_' + listener.id, 'description': listener.description, 'ipAddress': vip_address, 'protocol': lb_const.PROTOCOL_MAP[listener.protocol], 'port': listener.protocol_port, 'connectionLimit': connection_limit, 'defaultPoolId': default_pool, 'accelerationEnabled': ( listener.protocol == lb_const.LB_PROTOCOL_TCP), 'applicationProfileId': app_profile_id} # Add the L7 policies if listener.l7_policies: app_rule_ids = [] for pol in listener.l7_policies: binding = nsxv_db.get_nsxv_lbaas_l7policy_binding( context.session, pol.id) if binding: app_rule_ids.append(binding['edge_app_rule_id']) vse['applicationRuleId'] = app_rule_ids return vse def update_app_profile(vcns, context, listener, edge_id, edge_cert_id=None): lb_id = listener.loadbalancer_id listener_binding = nsxv_db.get_nsxv_lbaas_listener_binding( context.session, lb_id, listener.id) app_profile_id = listener_binding['app_profile_id'] app_profile = listener_to_edge_app_profile(listener, edge_cert_id) with locking.LockManager.get_lock(edge_id): vcns.update_app_profile( edge_id, app_profile_id, app_profile) return app_profile_id class EdgeListenerManager(base_mgr.EdgeLoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self, vcns_driver): super(EdgeListenerManager, self).__init__(vcns_driver) def _upload_certificate(self, context, edge_id, cert_id, certificate): cert_binding = nsxv_db.get_nsxv_lbaas_certificate_binding( context.session, cert_id, edge_id) if cert_binding: return cert_binding['edge_cert_id'] request = { 'pemEncoding': certificate.get_certificate(), 'privateKey': certificate.get_private_key()} passphrase = certificate.get_private_key_passphrase() if passphrase: request['passphrase'] = passphrase cert_obj = self.vcns.upload_edge_certificate(edge_id, request)[1] cert_list = cert_obj.get('certificates', {}) if cert_list: edge_cert_id = cert_list[0]['objectId'] else: error = _("Failed to upload a certificate to edge %s") % edge_id raise nsxv_exc.NsxPluginException(err_msg=error) nsxv_db.add_nsxv_lbaas_certificate_binding( context.session, cert_id, edge_id, edge_cert_id) return edge_cert_id @log_helpers.log_method_call def create(self, context, listener, certificate=None): default_pool = None lb_id = listener.loadbalancer_id lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) edge_id = lb_binding['edge_id'] if listener.default_pool and listener.default_pool.id: pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, listener.default_pool.id) if pool_binding: default_pool = pool_binding['edge_pool_id'] edge_cert_id = None if certificate: try: edge_cert_id = self._upload_certificate( context, edge_id, listener.default_tls_container_id, certificate) except Exception: with excutils.save_and_reraise_exception(): self.lbv2_driver.listener.failed_completion(context, listener) app_profile = listener_to_edge_app_profile(listener, edge_cert_id) app_profile_id = None try: with locking.LockManager.get_lock(edge_id): h = (self.vcns.create_app_profile(edge_id, app_profile))[0] app_profile_id = lb_common.extract_resource_id(h['location']) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.listener.failed_completion(context, listener) LOG.error('Failed to create app profile on edge: %s', lb_binding['edge_id']) vse = listener_to_edge_vse(context, listener, lb_binding['vip_address'], default_pool, app_profile_id) try: with locking.LockManager.get_lock(edge_id): h = self.vcns.create_vip(edge_id, vse)[0] edge_vse_id = lb_common.extract_resource_id(h['location']) nsxv_db.add_nsxv_lbaas_listener_binding(context.session, lb_id, listener.id, app_profile_id, edge_vse_id) self.lbv2_driver.listener.successful_completion(context, listener) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.listener.failed_completion(context, listener) LOG.error('Failed to create vip on Edge: %s', edge_id) self.vcns.delete_app_profile(edge_id, app_profile_id) @log_helpers.log_method_call def update(self, context, old_listener, new_listener, certificate=None): default_pool = None if new_listener.default_pool and new_listener.default_pool.id: pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, new_listener.loadbalancer_id, new_listener.default_pool.id) if pool_binding: default_pool = pool_binding['edge_pool_id'] else: LOG.error("Couldn't find pool binding for pool %s", new_listener.default_pool.id) lb_id = new_listener.loadbalancer_id listener_binding = nsxv_db.get_nsxv_lbaas_listener_binding( context.session, lb_id, new_listener.id) lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) edge_id = lb_binding['edge_id'] edge_cert_id = None if certificate: if (old_listener.default_tls_container_id != new_listener.default_tls_container_id): try: edge_cert_id = self._upload_certificate( context, edge_id, new_listener.default_tls_container_id, certificate) except Exception: with excutils.save_and_reraise_exception(): self.lbv2_driver.listener.failed_completion( context, new_listener) else: cert_binding = nsxv_db.get_nsxv_lbaas_certificate_binding( context.session, new_listener.default_tls_container_id, edge_id) edge_cert_id = cert_binding['edge_cert_id'] try: app_profile_id = update_app_profile( self.vcns, context, new_listener, edge_id, edge_cert_id=edge_cert_id) vse = listener_to_edge_vse(context, new_listener, lb_binding['vip_address'], default_pool, app_profile_id) with locking.LockManager.get_lock(edge_id): self.vcns.update_vip(edge_id, listener_binding['vse_id'], vse) self.lbv2_driver.listener.successful_completion(context, new_listener) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.listener.failed_completion(context, new_listener) LOG.error('Failed to update app profile on edge: %s', edge_id) @log_helpers.log_method_call def delete(self, context, listener): lb_id = listener.loadbalancer_id listener_binding = nsxv_db.get_nsxv_lbaas_listener_binding( context.session, lb_id, listener.id) lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) if lb_binding and listener_binding: edge_id = lb_binding['edge_id'] edge_vse_id = listener_binding['vse_id'] app_profile_id = listener_binding['app_profile_id'] try: with locking.LockManager.get_lock(edge_id): self.vcns.delete_vip(edge_id, edge_vse_id) except vcns_exc.ResourceNotFound: LOG.error('vip not found on edge: %s', edge_id) except vcns_exc.VcnsApiException: LOG.error('Failed to delete vip on edge: %s', edge_id) try: with locking.LockManager.get_lock(edge_id): self.vcns.delete_app_profile(edge_id, app_profile_id) except vcns_exc.ResourceNotFound: LOG.error('app profile not found on edge: %s', edge_id) except vcns_exc.VcnsApiException: LOG.error('Failed to delete app profile on Edge: %s', edge_id) nsxv_db.del_nsxv_lbaas_listener_binding(context.session, lb_id, listener.id) self.lbv2_driver.listener.successful_completion( context, listener, delete=True) vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v/v2/pool_mgr.py0000666000175100017510000002030713244523345024711 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from neutron_lib import exceptions as n_exc from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxv_exc from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common from vmware_nsx.services.lbaas.nsx_v.v2 import listener_mgr LOG = logging.getLogger(__name__) class EdgePoolManager(base_mgr.EdgeLoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self, vcns_driver): super(EdgePoolManager, self).__init__(vcns_driver) @log_helpers.log_method_call def create(self, context, pool): edge_pool = { 'name': 'pool_' + pool.id, 'description': getattr(pool, 'description', getattr(pool, 'name')), 'algorithm': lb_const.BALANCE_MAP.get(pool.lb_algorithm, 'round-robin'), 'transparent': False } lb_id = pool.loadbalancer_id lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) if not lb_binding: msg = _( 'No suitable Edge found for pool %s') % pool.id raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) edge_id = lb_binding['edge_id'] try: with locking.LockManager.get_lock(edge_id): h = self.vcns.create_pool(edge_id, edge_pool)[0] edge_pool_id = lb_common.extract_resource_id(h['location']) nsxv_db.add_nsxv_lbaas_pool_binding(context.session, lb_id, pool.id, edge_pool_id) if pool.listener: listener_binding = nsxv_db.get_nsxv_lbaas_listener_binding( context.session, lb_id, pool.listener.id) # Associate listener with pool vse = listener_mgr.listener_to_edge_vse( context, pool.listener, lb_binding['vip_address'], edge_pool_id, listener_binding['app_profile_id']) with locking.LockManager.get_lock(edge_id): self.vcns.update_vip(edge_id, listener_binding['vse_id'], vse) # This action also set this pool as the default pool of the # listener, so the application profile may need to be updated if pool.session_persistence: listener_mgr.update_app_profile( self.vcns, context, pool.listener, edge_id) self.lbv2_driver.pool.successful_completion(context, pool) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.pool.failed_completion(context, pool) LOG.error('Failed to create pool %s', pool.id) @log_helpers.log_method_call def update(self, context, old_pool, new_pool): edge_pool = { 'name': 'pool_' + new_pool.id, 'description': getattr(new_pool, 'description', getattr(new_pool, 'name')), 'algorithm': lb_const.BALANCE_MAP.get( new_pool.lb_algorithm, 'round-robin'), 'transparent': False } if new_pool.listener: listener = new_pool.listener lb_id = listener.loadbalancer_id else: lb_id = new_pool.loadbalancer_id lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, new_pool.id) edge_id = lb_binding['edge_id'] edge_pool_id = pool_binding['edge_pool_id'] try: with locking.LockManager.get_lock(edge_id): # get the configured monitor-id org_edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1] monitor_id = org_edge_pool.get('monitorId') if monitor_id: edge_pool['monitorId'] = monitor_id # Keep the current members if org_edge_pool.get('member'): edge_pool['member'] = org_edge_pool['member'] self.vcns.update_pool(edge_id, edge_pool_id, edge_pool) self.lbv2_driver.pool.successful_completion(context, new_pool) # if the session_persistence was changed, # we may need to update the listener application profile if new_pool.listener: old_sess_persist = old_pool.session_persistence new_sess_persist = new_pool.session_persistence if new_sess_persist != old_sess_persist: listener_mgr.update_app_profile( self.vcns, context, new_pool.listener, edge_id) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.pool.failed_completion(context, new_pool) LOG.error('Failed to update pool %s', new_pool.id) @log_helpers.log_method_call def delete(self, context, pool): lb_id = pool.loadbalancer_id lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, pool.id) edge_id = lb_binding['edge_id'] if not pool_binding: self.lbv2_driver.pool.successful_completion( context, pool, delete=True) return edge_pool_id = pool_binding['edge_pool_id'] listeners_to_update = [] try: if pool.listeners: for listener in pool.listeners: # the pool session persistence may affect the associated # pool application profile if (pool.session_persistence and listener.default_pool and listener.default_pool.id == pool.id): listeners_to_update.append(listener) listener_binding = nsxv_db.get_nsxv_lbaas_listener_binding( context.session, lb_id, listener.id) vse = listener_mgr.listener_to_edge_vse( context, listener, lb_binding['vip_address'], None, listener_binding['app_profile_id']) with locking.LockManager.get_lock(edge_id): self.vcns.update_vip( edge_id, listener_binding['vse_id'], vse) self.vcns.delete_pool(edge_id, edge_pool_id) self.lbv2_driver.pool.successful_completion( context, pool, delete=True) nsxv_db.del_nsxv_lbaas_pool_binding( context.session, lb_id, pool.id) for listener in listeners_to_update: # need to update the listeners too, now with no default pool listener.default_pool = None listener_mgr.update_app_profile( self.vcns, context, listener, edge_id) except nsxv_exc.VcnsApiException: self.lbv2_driver.pool.failed_completion(context, pool) LOG.error('Failed to delete pool %s', pool.id) vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v/v2/loadbalancer_mgr.py0000666000175100017510000002261113244523345026347 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.services.flavors import flavors_plugin from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import exceptions as n_exc from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxv_exc from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common LOG = logging.getLogger(__name__) class EdgeLoadBalancerManager(base_mgr.EdgeLoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self, vcns_driver): super(EdgeLoadBalancerManager, self).__init__(vcns_driver) registry.subscribe( self._handle_subnet_gw_change, resources.SUBNET, events.AFTER_UPDATE) def _get_lb_flavor_size(self, context, flavor_id): if not flavor_id: return vcns_const.SERVICE_SIZE_MAPPING['lb'] else: flavor = flavors_plugin.FlavorsPlugin.get_flavor( self.flavor_plugin, context, flavor_id) flavor_size = flavor['name'] if flavor_size.lower() in vcns_const.ALLOWED_EDGE_SIZES: return flavor_size.lower() else: err_msg = (_("Invalid flavor size %(flavor)s, only %(sizes)s " "are supported") % {'flavor': flavor_size, 'sizes': vcns_const.ALLOWED_EDGE_SIZES}) raise n_exc.InvalidInput(error_message=err_msg) @log_helpers.log_method_call def create(self, context, lb): lb_size = self._get_lb_flavor_size(context, lb.flavor_id) edge_id = lb_common.get_lbaas_edge_id( context, self.core_plugin, lb.id, lb.vip_address, lb.vip_subnet_id, lb.tenant_id, lb_size) if not edge_id: msg = _('Failed to allocate Edge on subnet %(sub)s for ' 'loadbalancer %(lb)s') % {'sub': lb.vip_subnet_id, 'lb': lb.id} raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) try: lb_common.enable_edge_acceleration(self.vcns, edge_id) edge_fw_rule_id = lb_common.add_vip_fw_rule( self.vcns, edge_id, lb.id, lb.vip_address) # set LB default rule lb_common.set_lb_firewall_default_rule(self.vcns, edge_id, 'accept') nsxv_db.add_nsxv_lbaas_loadbalancer_binding( context.session, lb.id, edge_id, edge_fw_rule_id, lb.vip_address) self.lbv2_driver.load_balancer.successful_completion(context, lb) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.load_balancer.failed_completion(context, lb) LOG.error('Failed to create pool %s', lb.id) @log_helpers.log_method_call def update(self, context, old_lb, new_lb): self.lbv2_driver.load_balancer.successful_completion(context, new_lb) @log_helpers.log_method_call def delete(self, context, lb): # Discard any ports which are associated with LB filters = { 'device_id': [lb.id], 'device_owner': [constants.DEVICE_OWNER_NEUTRON_PREFIX + 'LB']} lb_ports = self.core_plugin.get_ports(context.elevated(), filters=filters) for lb_port in lb_ports: self.core_plugin.delete_port(context.elevated(), lb_port['id']) binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb.id) if binding: edge_binding = nsxv_db.get_nsxv_router_binding_by_edge( context.session, binding['edge_id']) # set LB default rule lb_common.set_lb_firewall_default_rule( self.vcns, binding['edge_id'], 'deny') if edge_binding: old_lb = lb_common.is_lb_on_router_edge( context, self.core_plugin, binding['edge_id']) if not old_lb: resource_id = lb_common.get_lb_resource_id(lb.id) self.core_plugin.edge_manager.delete_lrouter( context, resource_id, dist=False) else: # Edge was created on an exclusive router with the old code try: lb_common.del_vip_fw_rule( self.vcns, binding['edge_id'], binding['edge_fw_rule_id']) except nsxv_exc.VcnsApiException as e: LOG.error('Failed to delete loadbalancer %(lb)s ' 'FW rule. exception is %(exc)s', {'lb': lb.id, 'exc': e}) try: lb_common.del_vip_as_secondary_ip(self.vcns, binding['edge_id'], lb.vip_address) except Exception as e: LOG.error('Failed to delete loadbalancer %(lb)s ' 'interface IP. exception is %(exc)s', {'lb': lb.id, 'exc': e}) nsxv_db.del_nsxv_lbaas_loadbalancer_binding(context.session, lb.id) self.lbv2_driver.load_balancer.successful_completion(context, lb, delete=True) @log_helpers.log_method_call def refresh(self, context, lb): # TODO(kobis): implememnt pass @log_helpers.log_method_call def stats(self, context, lb): stats = {'bytes_in': 0, 'bytes_out': 0, 'active_connections': 0, 'total_connections': 0} binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding(context.session, lb.id) try: lb_stats = self.vcns.get_loadbalancer_statistics( binding['edge_id']) except nsxv_exc.VcnsApiException: msg = (_('Failed to read load balancer statistics, edge: %s') % binding['edge_id']) raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) pools_stats = lb_stats[1].get('pool', []) for pool_stats in pools_stats: stats['bytes_in'] += pool_stats.get('bytesIn', 0) stats['bytes_out'] += pool_stats.get('bytesOut', 0) stats['active_connections'] += pool_stats.get('curSessions', 0) stats['total_connections'] += pool_stats.get('totalSessions', 0) return stats def _handle_subnet_gw_change(self, *args, **kwargs): # As the Edge appliance doesn't use DHCP, we should change the # default gateway here when the subnet GW changes. context = kwargs.get('context') orig = kwargs['original_subnet'] updated = kwargs['subnet'] if (orig['gateway_ip'] == updated['gateway_ip'] and self._routes_equal(orig['host_routes'], updated['host_routes'])): return subnet_id = updated['id'] subnet = self.core_plugin.get_subnet(context.elevated(), subnet_id) filters = {'fixed_ips': {'subnet_id': [subnet_id]}, 'device_owner': [constants.DEVICE_OWNER_LOADBALANCERV2]} lb_ports = self.core_plugin.get_ports(context.elevated(), filters=filters) if lb_ports: for lb_port in lb_ports: if lb_port['device_id']: edge_bind = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_port['device_id']) edge_id = edge_bind['edge_id'] routes = [{'cidr': r['destination'], 'nexthop': r['nexthop']} for r in subnet['host_routes']] self.core_plugin.nsx_v.update_routes( edge_id, subnet['gateway_ip'], routes) def _routes_equal(self, a, b): if len(a) != len(b): return False for a_item in a: found = False for b_item in b: # compare values as keysets should be same if set(a_item.values()) == set(b_item.values()): found = True if not found: return False return True vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v/v2/l7policy_mgr.py0000666000175100017510000003345313244523345025510 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from neutron_lib import constants from neutron_lib import exceptions as n_exc from vmware_nsx._i18n import _ from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common LOG = logging.getLogger(__name__) type_by_compare_type = { lb_const.L7_RULE_COMPARE_TYPE_EQUAL_TO: '', lb_const.L7_RULE_COMPARE_TYPE_REGEX: '_reg', lb_const.L7_RULE_COMPARE_TYPE_STARTS_WITH: '_beg', lb_const.L7_RULE_COMPARE_TYPE_ENDS_WITH: '_end', lb_const.L7_RULE_COMPARE_TYPE_CONTAINS: '_sub' } def policy_to_application_rule(policy): condition = '' rule_lines = [] for rule in policy.rules: if rule.provisioning_status == constants.PENDING_DELETE: # skip this rule as it is being deleted continue type_by_comp = type_by_compare_type.get(rule.compare_type) if type_by_comp is None: type_by_comp = '' LOG.warnning('Unsupported compare type %(type)s is used in ' 'policy %(id)s', {'type': rule.compare_type, 'id': policy.id}) if rule.type == lb_const.L7_RULE_TYPE_COOKIE: # Example: acl hdr_sub(cookie) SEEN=1 hdr_type = 'hdr' + type_by_comp rule_line = ('acl %(rule_id)s %(hdr_type)s(cookie) ' '%(key)s=%(val)s' % {'rule_id': rule.id, 'hdr_type': hdr_type, 'key': rule.key, 'val': rule.value}) elif rule.type == lb_const.L7_RULE_TYPE_HEADER: # Example: acl hdr(user-agent) -i test hdr_type = 'hdr' + type_by_comp rule_line = ('acl %(rule_id)s %(hdr_type)s(%(key)s) ' '-i %(val)s' % {'rule_id': rule.id, 'hdr_type': hdr_type, 'key': rule.key, 'val': rule.value}) elif rule.type == lb_const.L7_RULE_TYPE_HOST_NAME: # Example: acl hdr_beg(host) -i abcd hdr_type = 'hdr' + type_by_comp # -i for case insensitive host name rule_line = ('acl %(rule_id)s %(hdr_type)s(host) ' '-i %(val)s' % {'rule_id': rule.id, 'hdr_type': hdr_type, 'val': rule.value}) elif rule.type == lb_const.L7_RULE_TYPE_PATH: # Example: acl path_beg -i /images # -i for case insensitive path path_type = 'path' + type_by_comp rule_line = ('acl %(rule_id)s %(path_type)s ' '-i %(val)s' % {'rule_id': rule.id, 'path_type': path_type, 'val': rule.value}) elif rule.type == lb_const.L7_RULE_TYPE_FILE_TYPE: # Example: acl path_sub -i .jpg # Regardless of the compare type, always check contained in path. # -i for case insensitive file type val = rule.value if not val.startswith('.'): val = '.' + val rule_line = ('acl %(rule_id)s path_sub ' '-i %(val)s' % {'rule_id': rule.id, 'val': val}) else: msg = _('Unsupported L7rule type %s') % rule.type raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) rule_lines.append(rule_line) invert_sign = '!' if rule.invert else '' condition = condition + invert_sign + rule.id + ' ' if rule_lines: # concatenate all the rules with new lines all_rules = '\n'.join(rule_lines + ['']) # remove he last space from the condition condition = condition[:-1] else: all_rules = '' condition = 'TRUE' # prepare the action if policy.action == lb_const.L7_POLICY_ACTION_REJECT: # return HTTP 403 response action = 'http-request deny' elif policy.action == lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL: action = 'use_backend pool_%s' % policy.redirect_pool_id elif policy.action == lb_const.L7_POLICY_ACTION_REDIRECT_TO_URL: action = 'redirect location %s' % policy.redirect_url else: msg = _('Unsupported L7policy action %s') % policy.action raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) # Build the final script script = all_rules + '%(action)s if %(cond)s' % { 'action': action, 'cond': condition} app_rule = {'name': 'pol_' + policy.id, 'script': script} return app_rule def policy_to_edge_and_rule_id(context, policy_id): # get the nsx application rule id and edge id binding = nsxv_db.get_nsxv_lbaas_l7policy_binding( context.session, policy_id) if not binding: msg = _('No suitable Edge found for policy %s') % policy_id raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) return binding['edge_id'], binding['edge_app_rule_id'] class EdgeL7PolicyManager(base_mgr.EdgeLoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self, vcns_driver): super(EdgeL7PolicyManager, self).__init__(vcns_driver) def _add_app_rule_to_virtual_server(self, edge_id, vse_id, app_rule_id, policy_position): """Add the new nsx application rule to the virtual server""" # Get the current virtual server configuration vse = self.vcns.get_vip(edge_id, vse_id)[1] if 'applicationRuleId' not in vse: vse['applicationRuleId'] = [] # Add the policy (=application rule) in the correct position # (position begins at 1) if len(vse['applicationRuleId']) < policy_position: vse['applicationRuleId'].append(app_rule_id) else: vse['applicationRuleId'].insert(policy_position - 1, app_rule_id) # update the backend with the new configuration self.vcns.update_vip(edge_id, vse_id, vse) def _del_app_rule_from_virtual_server(self, edge_id, vse_id, app_rule_id): """Delete nsx application rule from the virtual server""" # Get the current virtual server configuration vse = self.vcns.get_vip(edge_id, vse_id)[1] if 'applicationRuleId' not in vse: vse['applicationRuleId'] = [] # Remove the rule from the list if (app_rule_id in vse['applicationRuleId']): vse['applicationRuleId'].remove(app_rule_id) # update the backend with the new configuration self.vcns.update_vip(edge_id, vse_id, vse) def _update_app_rule_possition_in_virtual_server(self, edge_id, vse_id, app_rule_id, policy_position): """Move the new nsx application rule to another position""" # Get the current virtual server configuration vse = self.vcns.get_vip(edge_id, vse_id)[1] # delete the policy (= application rule) from the list if app_rule_id in vse['applicationRuleId']: vse['applicationRuleId'].remove(app_rule_id) # Add the policy (=application rule) in the correct position # (position begins at 1) if len(vse['applicationRuleId']) < policy_position: vse['applicationRuleId'].append(app_rule_id) else: vse['applicationRuleId'].insert(policy_position - 1, app_rule_id) # update the backend with the new configuration self.vcns.update_vip(edge_id, vse_id, vse) def _get_vse_id(self, context, pol): lb_id = pol.listener.loadbalancer_id list_id = pol.listener.id listener_binding = nsxv_db.get_nsxv_lbaas_listener_binding( context.session, lb_id, list_id) if listener_binding: return listener_binding['vse_id'] @log_helpers.log_method_call def create(self, context, pol): # find out the edge to be updated, by the listener of this policy lb_id = pol.listener.loadbalancer_id lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) if not lb_binding: msg = _( 'No suitable Edge found for listener %s') % pol.listener_id raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) edge_id = lb_binding['edge_id'] app_rule = policy_to_application_rule(pol) app_rule_id = None try: with locking.LockManager.get_lock(edge_id): # create the backend application rule for this policy h = (self.vcns.create_app_rule(edge_id, app_rule))[0] app_rule_id = lb_common.extract_resource_id(h['location']) # add the nsx application rule (neutron policy) to the nsx # virtual server (neutron listener) vse_id = self._get_vse_id(context, pol) if vse_id: self._add_app_rule_to_virtual_server( edge_id, vse_id, app_rule_id, pol.position) except Exception as e: with excutils.save_and_reraise_exception(): self.lbv2_driver.l7policy.failed_completion(context, pol) LOG.error('Failed to create L7policy on edge %(edge)s: ' '%(err)s', {'edge': edge_id, 'err': e}) if app_rule_id: # Failed to add the rule to the vip: delete the rule # from the backend. try: self.vcns.delete_app_rule(edge_id, app_rule_id) except Exception: pass # save the nsx application rule id in the DB nsxv_db.add_nsxv_lbaas_l7policy_binding(context.session, pol.id, edge_id, app_rule_id) # complete the transaction self.lbv2_driver.l7policy.successful_completion(context, pol) @log_helpers.log_method_call def update(self, context, old_pol, new_pol): # get the nsx application rule id and edge id from the nsx DB edge_id, app_rule_id = policy_to_edge_and_rule_id(context, new_pol.id) # create the script for the new policy data app_rule = policy_to_application_rule(new_pol) try: with locking.LockManager.get_lock(edge_id): # update the backend application rule for the new policy self.vcns.update_app_rule(edge_id, app_rule_id, app_rule) # if the position changed - update it too if old_pol.position != new_pol.position: vse_id = self._get_vse_id(context, new_pol) if vse_id: self._update_app_rule_possition_in_virtual_server( edge_id, vse_id, app_rule_id, new_pol.position) except Exception as e: with excutils.save_and_reraise_exception(): self.lbv2_driver.l7policy.failed_completion(context, new_pol) LOG.error('Failed to update L7policy on edge %(edge)s: ' '%(err)s', {'edge': edge_id, 'err': e}) # complete the transaction self.lbv2_driver.l7policy.successful_completion(context, new_pol) @log_helpers.log_method_call def delete(self, context, pol): # get the nsx application rule id and edge id from the nsx DB try: edge_id, app_rule_id = policy_to_edge_and_rule_id(context, pol.id) except n_exc.BadRequest: # This is probably a policy that we failed to create properly. # We should allow deleting it self.lbv2_driver.l7policy.successful_completion(context, pol, delete=True) return with locking.LockManager.get_lock(edge_id): try: # remove the nsx application rule from the virtual server vse_id = self._get_vse_id(context, pol) if vse_id: self._del_app_rule_from_virtual_server( edge_id, vse_id, app_rule_id) # delete the nsx application rule self.vcns.delete_app_rule(edge_id, app_rule_id) except Exception as e: with excutils.save_and_reraise_exception(): self.lbv2_driver.l7policy.failed_completion(context, pol) LOG.error('Failed to delete L7policy on edge ' '%(edge)s: %(err)s', {'edge': edge_id, 'err': e}) # delete the nsxv db entry nsxv_db.del_nsxv_lbaas_l7policy_binding(context.session, pol.id) # complete the transaction self.lbv2_driver.l7policy.successful_completion(context, pol, delete=True) vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v/v2/edge_loadbalancer_driver_v2.py0000666000175100017510000000323113244523345030445 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from vmware_nsx.services.lbaas.nsx_v.v2 import healthmon_mgr as hm_mgr from vmware_nsx.services.lbaas.nsx_v.v2 import l7policy_mgr from vmware_nsx.services.lbaas.nsx_v.v2 import l7rule_mgr from vmware_nsx.services.lbaas.nsx_v.v2 import listener_mgr from vmware_nsx.services.lbaas.nsx_v.v2 import loadbalancer_mgr as lb_mgr from vmware_nsx.services.lbaas.nsx_v.v2 import member_mgr from vmware_nsx.services.lbaas.nsx_v.v2 import pool_mgr class EdgeLoadbalancerDriverV2(object): @log_helpers.log_method_call def __init__(self): super(EdgeLoadbalancerDriverV2, self).__init__() self.loadbalancer = lb_mgr.EdgeLoadBalancerManager(self) self.listener = listener_mgr.EdgeListenerManager(self) self.pool = pool_mgr.EdgePoolManager(self) self.member = member_mgr.EdgeMemberManager(self) self.healthmonitor = hm_mgr.EdgeHealthMonitorManager(self) self.l7policy = l7policy_mgr.EdgeL7PolicyManager(self) self.l7rule = l7rule_mgr.EdgeL7RuleManager(self) vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v/v2/member_mgr.py0000666000175100017510000001773613244523345025223 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from neutron_lib import exceptions as n_exc from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxv_exc from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common LOG = logging.getLogger(__name__) class EdgeMemberManager(base_mgr.EdgeLoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self, vcns_driver): super(EdgeMemberManager, self).__init__(vcns_driver) self._fw_section_id = None def _get_pool_lb_id(self, member): listener = member.pool.listener if listener: lb_id = listener.loadbalancer_id else: lb_id = member.pool.loadbalancer.id return lb_id @log_helpers.log_method_call def create(self, context, member): lb_id = self._get_pool_lb_id(member) lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) edge_id = lb_binding['edge_id'] pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, member.pool_id) if not pool_binding: self.lbv2_driver.member.failed_completion( context, member) msg = _('Failed to create member on edge: %s. ' 'Binding not found') % edge_id LOG.error(msg) raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) edge_pool_id = pool_binding['edge_pool_id'] with locking.LockManager.get_lock(edge_id): if not lb_common.is_lb_on_router_edge( context.elevated(), self.core_plugin, edge_id): # Verify that Edge appliance is connected to the member's # subnet (only if this is a dedicated loadbalancer edge) if not lb_common.get_lb_interface( context, self.core_plugin, lb_id, member.subnet_id): lb_common.create_lb_interface( context, self.core_plugin, lb_id, member.subnet_id, member.tenant_id) edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1] edge_member = { 'ipAddress': member.address, 'weight': member.weight, 'port': member.protocol_port, 'monitorPort': member.protocol_port, 'name': lb_common.get_member_id(member.id), 'condition': 'enabled' if member.admin_state_up else 'disabled'} if edge_pool.get('member'): edge_pool['member'].append(edge_member) else: edge_pool['member'] = [edge_member] try: self.vcns.update_pool(edge_id, edge_pool_id, edge_pool) self.lbv2_driver.member.successful_completion(context, member) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.member.failed_completion(context, member) LOG.error('Failed to create member on edge: %s', edge_id) @log_helpers.log_method_call def update(self, context, old_member, new_member): lb_id = self._get_pool_lb_id(new_member) lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding(context.session, lb_id, new_member.pool_id) edge_id = lb_binding['edge_id'] edge_pool_id = pool_binding['edge_pool_id'] edge_member = { 'ipAddress': new_member.address, 'weight': new_member.weight, 'port': new_member.protocol_port, 'monitorPort': new_member.protocol_port, 'name': lb_common.get_member_id(new_member.id), 'condition': 'enabled' if new_member.admin_state_up else 'disabled'} with locking.LockManager.get_lock(edge_id): edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1] if edge_pool.get('member'): for i, m in enumerate(edge_pool['member']): if m['name'] == lb_common.get_member_id(new_member.id): edge_pool['member'][i] = edge_member break try: self.vcns.update_pool(edge_id, edge_pool_id, edge_pool) self.lbv2_driver.member.successful_completion( context, new_member) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.member.failed_completion( context, new_member) LOG.error('Failed to update member on edge: %s', edge_id) else: LOG.error('Pool %(pool_id)s on Edge %(edge_id)s has no ' 'members to update', {'pool_id': new_member.pool.id, 'edge_id': edge_id}) @log_helpers.log_method_call def delete(self, context, member): lb_id = self._get_pool_lb_id(member) lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, member.pool_id) edge_id = lb_binding['edge_id'] with locking.LockManager.get_lock(edge_id): # we should remove LB subnet interface if no members are attached # and this is not the LB's VIP interface remove_interface = True if member.subnet_id == member.pool.loadbalancer.vip_subnet_id: remove_interface = False else: for m in member.pool.members: if m.subnet_id == member.subnet_id and m.id != member.id: remove_interface = False if remove_interface: lb_common.delete_lb_interface(context, self.core_plugin, lb_id, member.subnet_id) if not pool_binding: self.lbv2_driver.member.successful_completion( context, member, delete=True) return edge_pool_id = pool_binding['edge_pool_id'] edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1] for i, m in enumerate(edge_pool['member']): if m['name'] == lb_common.get_member_id(member.id): edge_pool['member'].pop(i) break try: self.vcns.update_pool(edge_id, edge_pool_id, edge_pool) self.lbv2_driver.member.successful_completion( context, member, delete=True) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.member.failed_completion(context, member) LOG.error('Failed to delete member on edge: %s', edge_id) vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v/v2/__init__.py0000666000175100017510000000000013244523345024616 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v/v2/l7rule_mgr.py0000666000175100017510000000524313244523345025154 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx.common import locking from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas.nsx_v.v2 import l7policy_mgr LOG = logging.getLogger(__name__) class EdgeL7RuleManager(base_mgr.EdgeLoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self, vcns_driver): super(EdgeL7RuleManager, self).__init__(vcns_driver) def _handle_l7policy_rules_change(self, context, rule, delete=False): # Get the nsx application rule id and edge id edge_id, app_rule_id = l7policy_mgr.policy_to_edge_and_rule_id( context, rule.l7policy_id) # Create the script for the new policy data. # The policy obj on the rule is already updated with the # created/updated/deleted rule. app_rule = l7policy_mgr.policy_to_application_rule(rule.policy) try: with locking.LockManager.get_lock(edge_id): # update the backend application rule for the updated policy self.vcns.update_app_rule(edge_id, app_rule_id, app_rule) except Exception as e: with excutils.save_and_reraise_exception(): self.lbv2_driver.l7rule.failed_completion(context, rule) LOG.error('Failed to update L7rules on edge %(edge)s: ' '%(err)s', {'edge': edge_id, 'err': e}) # complete the transaction self.lbv2_driver.l7rule.successful_completion(context, rule, delete=delete) @log_helpers.log_method_call def create(self, context, rule): self._handle_l7policy_rules_change(context, rule) @log_helpers.log_method_call def update(self, context, old_rule, new_rule): self._handle_l7policy_rules_change(context, new_rule) @log_helpers.log_method_call def delete(self, context, rule): self._handle_l7policy_rules_change(context, rule, delete=True) vmware-nsx-12.0.1/vmware_nsx/services/lbaas/nsx_v/__init__.py0000666000175100017510000000000013244523345024267 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/ipam/0000775000175100017510000000000013244524600020670 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/ipam/nsx_v3/0000775000175100017510000000000013244524600022110 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/ipam/nsx_v3/__init__.py0000666000175100017510000000000013244523345024216 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/ipam/nsx_v3/driver.py0000666000175100017510000002443313244523345023772 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_log import log as logging from neutron.ipam import exceptions as ipam_exc from neutron.ipam import requests as ipam_req from vmware_nsx._i18n import _ from vmware_nsx.services.ipam.common import driver as common from vmware_nsxlib.v3 import exceptions as nsx_lib_exc from vmware_nsxlib.v3 import nsx_constants as error LOG = logging.getLogger(__name__) class Nsxv3IpamDriver(common.NsxAbstractIpamDriver): """IPAM Driver For NSX-V3 networks.""" def __init__(self, subnetpool, context): super(Nsxv3IpamDriver, self).__init__(subnetpool, context) self.nsxlib_ipam = self._nsxlib.ip_pool # Mark which updates to the pool are supported self.support_update_gateway = True self.support_update_pools = True @property def _subnet_class(self): return Nsxv3IpamSubnet def _get_cidr_from_request(self, subnet_request): return "%s/%s" % (subnet_request.subnet_cidr[0], subnet_request.prefixlen) def _get_ranges_from_request(self, subnet_request): if subnet_request.allocation_pools: ranges = [ {'start': str(pool[0]), 'end': str(pool[-1])} for pool in subnet_request.allocation_pools] else: ranges = [] return ranges def allocate_backend_pool(self, subnet_request): """Create a pool on the NSX backend and return its ID""" # name/description length on backend is long, so there is no problem name = 'subnet_' + subnet_request.subnet_id description = 'OS IP pool for subnet ' + subnet_request.subnet_id try: response = self.nsxlib_ipam.create( self._get_cidr_from_request(subnet_request), allocation_ranges=self._get_ranges_from_request( subnet_request), display_name=name, description=description, gateway_ip=subnet_request.gateway_ip) nsx_pool_id = response['id'] except Exception as e: #TODO(asarfaty): handle specific errors msg = _('Failed to create subnet IPAM: %s') % e raise ipam_exc.IpamValueInvalid(message=msg) return nsx_pool_id def delete_backend_pool(self, nsx_pool_id): # Because of the delete_subnet flow in the neutron plugin, # some ports still hold IPs from this pool. # Those ports be deleted shortly after this function. # We need to release those IPs before deleting the backed pool, # or else it will fail. pool_allocations = self.nsxlib_ipam.get_allocations(nsx_pool_id) if pool_allocations and pool_allocations.get('result_count'): for allocation in pool_allocations.get('results', []): ip_addr = allocation.get('allocation_id') try: self.nsxlib_ipam.release(nsx_pool_id, ip_addr) except Exception as e: LOG.warning("Failed to release ip %(ip)s from pool " "%(pool)s: %(e)s", {'ip': ip_addr, 'pool': nsx_pool_id, 'e': e}) try: self.nsxlib_ipam.delete(nsx_pool_id) except Exception as e: LOG.error("Failed to delete IPAM from backend: %s", e) # Continue anyway, since this subnet was already removed def update_backend_pool(self, nsx_pool_id, subnet_request): update_args = { 'cidr': self._get_cidr_from_request(subnet_request), 'allocation_ranges': self._get_ranges_from_request(subnet_request), 'gateway_ip': subnet_request.gateway_ip} try: self.nsxlib_ipam.update( nsx_pool_id, **update_args) except nsx_lib_exc.ManagerError as e: LOG.error("NSX IPAM failed to update pool %(id)s: " " %(e)s; code %(code)s", {'e': e, 'id': nsx_pool_id, 'code': e.error_code}) if (e.error_code == error.ERR_CODE_IPAM_RANGE_MODIFY or e.error_code == error.ERR_CODE_IPAM_RANGE_DELETE or e.error_code == error.ERR_CODE_IPAM_RANGE_SHRUNK): # The change is not allowed: already allocated IPs out of # the new range raise ipam_exc.InvalidSubnetRequest( reason=_("Already allocated IPs outside of the updated " "pools")) except Exception as e: # unexpected error msg = _('Failed to update subnet IPAM: %s') % e raise ipam_exc.IpamValueInvalid(message=msg) class Nsxv3IpamSubnet(common.NsxAbstractIpamSubnet): """Manage IP addresses for the NSX V3 IPAM driver.""" def __init__(self, subnet_id, nsx_pool_id, ctx, tenant_id): super(Nsxv3IpamSubnet, self).__init__( subnet_id, nsx_pool_id, ctx, tenant_id) self.nsxlib_ipam = self._nsxlib.ip_pool def backend_allocate(self, address_request): try: # allocate a specific IP if isinstance(address_request, ipam_req.SpecificAddressRequest): # This handles both specific and automatic address requests ip_address = str(address_request.address) # If this is the subnet gateway IP - no need to allocate it subnet = self.get_details() if str(subnet.gateway_ip) == ip_address: LOG.info("Skip allocation of gateway-ip for pool %s", self._nsx_pool_id) return ip_address else: # Allocate any free IP ip_address = None response = self.nsxlib_ipam.allocate(self._nsx_pool_id, ip_addr=ip_address) ip_address = response['allocation_id'] except nsx_lib_exc.ManagerError as e: LOG.error("NSX IPAM failed to allocate ip %(ip)s of subnet " "%(id)s: %(e)s; code %(code)s", {'e': e, 'ip': ip_address, 'id': self._subnet_id, 'code': e.error_code}) if e.error_code == error.ERR_CODE_IPAM_POOL_EXHAUSTED: # No more IP addresses available on the pool raise ipam_exc.IpAddressGenerationFailure( subnet_id=self._subnet_id) if e.error_code == error.ERR_CODE_IPAM_SPECIFIC_IP: # The NSX backend does not support allocation of specific IPs # prior to version 2.0. msg = (_("NSX-V3 IPAM driver does not support allocation of a " "specific ip %s for port") % ip_address) raise NotImplementedError(msg) if e.error_code == error.ERR_CODE_IPAM_IP_ALLOCATED: # This IP is already in use raise ipam_exc.IpAddressAlreadyAllocated( ip=ip_address, subnet_id=self._subnet_id) if e.error_code == error.ERR_CODE_OBJECT_NOT_FOUND: msg = (_("NSX-V3 IPAM failed to allocate: pool %s was not " "found") % self._nsx_pool_id) raise ipam_exc.IpamValueInvalid(message=msg) else: # another backend error raise ipam_exc.IPAllocationFailed() except Exception as e: LOG.error("NSX IPAM failed to allocate ip %(ip)s of subnet " "%(id)s: %(e)s", {'e': e, 'ip': ip_address, 'id': self._subnet_id}) # handle unexpected failures raise ipam_exc.IPAllocationFailed() return ip_address def backend_deallocate(self, ip_address): # If this is the subnet gateway IP - no need to allocate it subnet = self.get_details() if str(subnet.gateway_ip) == ip_address: LOG.info("Skip deallocation of gateway-ip for pool %s", self._nsx_pool_id) return try: self.nsxlib_ipam.release(self._nsx_pool_id, ip_address) except nsx_lib_exc.ManagerError as e: # fail silently LOG.error("NSX IPAM failed to free ip %(ip)s of subnet " "%(id)s: %(e)s; code %(code)s", {'e': e, 'ip': ip_address, 'id': self._subnet_id, 'code': e.error_code}) def get_details(self): """Return subnet data as a SpecificSubnetRequest""" # get the pool from the backend try: pool_details = self.nsxlib_ipam.get(self._nsx_pool_id) except Exception as e: msg = _('Failed to get details for nsx pool: %(id)s: ' '%(e)s') % {'id': self._nsx_pool_id, 'e': e} raise ipam_exc.IpamValueInvalid(message=msg) first_range = pool_details.get('subnets', [None])[0] if not first_range: msg = _('Failed to get details for nsx pool: %(id)s') % { 'id': self._nsx_pool_id} raise ipam_exc.IpamValueInvalid(message=msg) cidr = first_range.get('cidr') gateway_ip = first_range.get('gateway_ip') pools = [] for subnet in pool_details.get('subnets', []): for ip_range in subnet.get('allocation_ranges', []): pools.append(netaddr.IPRange(ip_range.get('start'), ip_range.get('end'))) return ipam_req.SpecificSubnetRequest( self._tenant_id, self._subnet_id, cidr, gateway_ip=gateway_ip, allocation_pools=pools) vmware-nsx-12.0.1/vmware_nsx/services/ipam/__init__.py0000666000175100017510000000000013244523345022776 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/ipam/common/0000775000175100017510000000000013244524600022160 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/ipam/common/__init__.py0000666000175100017510000000000013244523345024266 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/ipam/common/driver.py0000666000175100017510000002462313244523345024043 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from oslo_log import log as logging from neutron.ipam import driver as ipam_base from neutron.ipam.drivers.neutrondb_ipam import driver as neutron_driver from neutron.ipam import exceptions as ipam_exc from neutron.ipam import requests as ipam_req from neutron.ipam import subnet_alloc from neutron_lib.plugins import directory from vmware_nsx.db import db as nsx_db from vmware_nsx.extensions import projectpluginmap LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class NsxIpamBase(object): @classmethod def get_core_plugin(cls): return directory.get_plugin() @property def _nsxlib(self): p = self.get_core_plugin() if p.is_tvd_plugin(): # get the NSX-T sub-plugin p = p.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_T) elif p.plugin_type() != projectpluginmap.NsxPlugins.NSX_T: # Non NSX-T plugin return return p.nsxlib @property def _vcns(self): p = self.get_core_plugin() if p.is_tvd_plugin(): # get the NSX-V sub-plugin p = p.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_V) elif p.plugin_type() != projectpluginmap.NsxPlugins.NSX_V: # Non NSX-V plugin return return p.nsx_v.vcns @classmethod def _fetch_subnet(cls, context, id): p = cls.get_core_plugin() return p._get_subnet(context, id) @classmethod def _fetch_network(cls, context, id): p = cls.get_core_plugin() return p.get_network(context, id) class NsxSubnetRequestFactory(ipam_req.SubnetRequestFactory, NsxIpamBase): """Builds request using subnet info, including the network id""" @classmethod def get_request(cls, context, subnet, subnetpool): req = super(NsxSubnetRequestFactory, cls).get_request( context, subnet, subnetpool) # Add the network id into the request if 'network_id' in subnet: req.network_id = subnet['network_id'] return req class NsxAbstractIpamDriver(subnet_alloc.SubnetAllocator, NsxIpamBase): """Abstract IPAM Driver For NSX.""" def __init__(self, subnetpool, context): super(NsxAbstractIpamDriver, self).__init__(subnetpool, context) # in case of unsupported networks (or pre-upgrade networks) # the neutron internal driver will be used self.default_ipam = neutron_driver.NeutronDbPool(subnetpool, context) # Mark which updates to the pool are supported # (The NSX-v backend does not support changing the ip pool cidr # or gateway) self.support_update_gateway = False self.support_update_pools = False def _is_supported_net(self, subnet_request): """By default - all networks are supported""" return True def get_subnet_request_factory(self): # override the OOB factory to add the network ID return NsxSubnetRequestFactory @abc.abstractproperty def _subnet_class(self): """Return the class of the subnet that should be used.""" pass def get_subnet(self, subnet_id): """Retrieve an IPAM subnet.""" nsx_pool_id = nsx_db.get_nsx_ipam_pool_for_subnet( self._context.session, subnet_id) if not nsx_pool_id: # Unsupported (or pre-upgrade) network return self.default_ipam.get_subnet(subnet_id) return self._subnet_class.load(subnet_id, nsx_pool_id, self._context) @abc.abstractmethod def allocate_backend_pool(self, subnet_request): """Create a pool on the NSX backend and return its ID""" pass def allocate_subnet(self, subnet_request): """Create an IPAMSubnet object for the provided request.""" if not self._is_supported_net(subnet_request=subnet_request): # fallback to the neutron internal driver implementation return self.default_ipam.allocate_subnet(subnet_request) if self._subnetpool: subnet = super(NsxAbstractIpamDriver, self).allocate_subnet( subnet_request) subnet_request = subnet.get_details() # SubnetRequest must be an instance of SpecificSubnet if not isinstance(subnet_request, ipam_req.SpecificSubnetRequest): raise ipam_exc.InvalidSubnetRequestType( subnet_type=type(subnet_request)) # Add the pool to the NSX backend nsx_pool_id = self.allocate_backend_pool(subnet_request) # Add the pool to the DB nsx_db.add_nsx_ipam_subnet_pool(self._context.session, subnet_request.subnet_id, nsx_pool_id) # return the subnet object return self._subnet_class.load(subnet_request.subnet_id, nsx_pool_id, self._context, tenant_id=subnet_request.tenant_id) @abc.abstractmethod def update_backend_pool(self, nsx_pool_id, subnet_request): pass def _raise_update_not_supported(self): msg = _('Changing the subnet range or gateway is not supported') raise ipam_exc.IpamValueInvalid(message=msg) def update_subnet(self, subnet_request): """Update subnet info in the IPAM driver. Do the update only if the specific change is supported by the backend """ nsx_pool_id = nsx_db.get_nsx_ipam_pool_for_subnet( self._context.session, subnet_request.subnet_id) if not nsx_pool_id: # Unsupported (or pre-upgrade) network return self.default_ipam.update_subnet( subnet_request) # get the current pool data curr_subnet = self._subnet_class.load( subnet_request.subnet_id, nsx_pool_id, self._context, tenant_id=subnet_request.tenant_id).get_details() # check if the gateway changed gateway_changed = False if (str(subnet_request.gateway_ip) != str(curr_subnet.gateway_ip)): if not self.support_update_gateway: self._raise_update_not_supported() gateway_changed = True # check that the prefix / cidr / pools changed pools_changed = False if subnet_request.prefixlen != curr_subnet.prefixlen: if not self.support_update_pools: self._raise_update_not_supported() pools_changed = True if subnet_request.subnet_cidr[0] != curr_subnet.subnet_cidr[0]: if not self.support_update_pools: self._raise_update_not_supported() pools_changed = True if (len(subnet_request.allocation_pools) != len(curr_subnet.allocation_pools)): if not self.support_update_pools: self._raise_update_not_supported() pools_changed = True if (len(subnet_request.allocation_pools) != len(curr_subnet.allocation_pools)): if not self.support_update_pools: self._raise_update_not_supported() pools_changed = True else: for pool_ind in range(len(subnet_request.allocation_pools)): pool_req = subnet_request.allocation_pools[pool_ind] curr_pool = curr_subnet.allocation_pools[pool_ind] if (pool_req.first != curr_pool.first or pool_req.last != curr_pool.last): if not self.support_update_pools: self._raise_update_not_supported() pools_changed = True # update the relevant attributes at the backend pool if gateway_changed or pools_changed: self.update_backend_pool(nsx_pool_id, subnet_request) @abc.abstractmethod def delete_backend_pool(self, nsx_pool_id): pass def remove_subnet(self, subnet_id): """Delete an IPAM subnet pool from backend & DB.""" nsx_pool_id = nsx_db.get_nsx_ipam_pool_for_subnet( self._context.session, subnet_id) if not nsx_pool_id: # Unsupported (or pre-upgrade) network self.default_ipam.remove_subnet(subnet_id) return # Delete from backend self.delete_backend_pool(nsx_pool_id) # delete pool from DB nsx_db.del_nsx_ipam_subnet_pool(self._context.session, subnet_id, nsx_pool_id) class NsxIpamSubnetManager(object): def __init__(self, neutron_subnet_id): self._neutron_subnet_id = neutron_subnet_id @property def neutron_id(self): return self._neutron_subnet_id class NsxAbstractIpamSubnet(ipam_base.Subnet, NsxIpamBase): """Manage IP addresses for the NSX IPAM driver.""" def __init__(self, subnet_id, nsx_pool_id, ctx, tenant_id): self._subnet_id = subnet_id self._nsx_pool_id = nsx_pool_id self._context = ctx self._tenant_id = tenant_id #TODO(asarfaty): this subnet_manager is currently required by the #pluggable-ipam-driver self.subnet_manager = NsxIpamSubnetManager(self._subnet_id) @classmethod def load(cls, neutron_subnet_id, nsx_pool_id, ctx, tenant_id=None): """Load an IPAM subnet object given its neutron ID.""" return cls(neutron_subnet_id, nsx_pool_id, ctx, tenant_id) def allocate(self, address_request): """Allocate an IP from the pool""" return self.backend_allocate(address_request) @abc.abstractmethod def backend_allocate(self, address_request): pass def deallocate(self, address): """Return an IP to the pool""" self.backend_deallocate(address) @abc.abstractmethod def backend_deallocate(self, address): pass def update_allocation_pools(self, pools, cidr): # Not supported pass vmware-nsx-12.0.1/vmware_nsx/services/ipam/nsx_tvd/0000775000175100017510000000000013244524600022355 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/ipam/nsx_tvd/__init__.py0000666000175100017510000000000013244523345024463 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/ipam/nsx_tvd/driver.py0000666000175100017510000000677513244523345024250 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron.ipam import exceptions as ipam_exc from neutron.ipam import subnet_alloc from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx import utils as tvd_utils from vmware_nsx.services.ipam.common import driver as common_driver from vmware_nsx.services.ipam.nsx_v import driver as v_driver from vmware_nsx.services.ipam.nsx_v3 import driver as t_driver LOG = logging.getLogger(__name__) class NsxTvdIpamDriver(subnet_alloc.SubnetAllocator, common_driver.NsxIpamBase): """IPAM Driver For NSX-TVD plugin.""" def __init__(self, subnetpool, context): super(NsxTvdIpamDriver, self).__init__(subnetpool, context) # initialize the different drivers self.drivers = {} try: self.drivers[projectpluginmap.NsxPlugins.NSX_T] = ( t_driver.Nsxv3IpamDriver(subnetpool, context)) except Exception as e: LOG.warning("NsxTvdIpamDriver failed to initialize the NSX-T " "driver %s", e) self.drivers[projectpluginmap.NsxPlugins.NSX_T] = None try: self.drivers[projectpluginmap.NsxPlugins.NSX_V] = ( v_driver.NsxvIpamDriver(subnetpool, context)) except Exception as e: LOG.warning("NsxTvdIpamDriver failed to initialize the NSX-V " "driver %s", e) self.drivers[projectpluginmap.NsxPlugins.NSX_V] = None def get_T_driver(self): return self.drivers[projectpluginmap.NsxPlugins.NSX_T] def get_V_driver(self): return self.drivers[projectpluginmap.NsxPlugins.NSX_V] def _get_driver_for_project(self, project): plugin_type = tvd_utils.get_tvd_plugin_type_for_project(project) if not self.drivers.get(plugin_type): LOG.error("Project %(project)s with plugin %(plugin)s has no " "support for IPAM", {'project': project, 'plugin': plugin_type}) raise ipam_exc.IpamValueInvalid( msg="IPAM driver not found") return self.drivers[plugin_type] def allocate_subnet(self, subnet_request): d = self._get_driver_for_project(subnet_request.tenant_id) return d.allocate_subnet(subnet_request) def update_subnet(self, subnet_request): d = self._get_driver_for_project(subnet_request.tenant_id) return d.update_subnet(subnet_request) def remove_subnet(self, subnet_id): d = self._get_driver_for_project(self._context.tenant_id) return d.remove_subnet(subnet_id) def get_subnet(self, subnet_id): d = self._get_driver_for_project(self._context.tenant_id) return d.get_subnet(subnet_id) def get_subnet_request_factory(self): d = self._get_driver_for_project(self._context.tenant_id) return d.get_subnet_request_factory() vmware-nsx-12.0.1/vmware_nsx/services/ipam/nsx_v/0000775000175100017510000000000013244524600022025 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/ipam/nsx_v/__init__.py0000666000175100017510000000000013244523345024133 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/ipam/nsx_v/driver.py0000666000175100017510000001764213244523345023713 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import xml.etree.ElementTree as et import netaddr from neutron.ipam import exceptions as ipam_exc from neutron.ipam import requests as ipam_req from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api import validators from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.plugins.nsx_v.vshield.common import constants from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as vc_exc from vmware_nsx.services.ipam.common import driver as common LOG = logging.getLogger(__name__) class NsxvIpamDriver(common.NsxAbstractIpamDriver, common.NsxIpamBase): """IPAM Driver For NSX-V external & provider networks.""" def _is_ext_or_provider_net(self, subnet_request): """Return True if the network of the request is external or provider network """ network_id = subnet_request.network_id if network_id: network = self._fetch_network(self._context, network_id) if network.get(extnet_apidef.EXTERNAL): # external network return True if (validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)) or validators.is_attr_set(network.get(pnet.NETWORK_TYPE))): # provider network return True return False def _is_ipv6_subnet(self, subnet_request): """Return True if the network of the request is an ipv6 network""" if isinstance(subnet_request, ipam_req.SpecificSubnetRequest): return subnet_request.subnet_cidr.version == 6 else: if subnet_request.allocation_pools: for pool in subnet_request.allocation_pools: if pool.version == 6: return True return False def _is_supported_net(self, subnet_request): """This driver supports only ipv4 external/provider networks""" return (self._is_ext_or_provider_net(subnet_request) and not self._is_ipv6_subnet(subnet_request)) @property def _subnet_class(self): return NsxvIpamSubnet def allocate_backend_pool(self, subnet_request): """Create a pool on the NSX backend and return its ID""" if subnet_request.allocation_pools: ranges = [ {'ipRangeDto': {'startAddress': netaddr.IPAddress(pool.first), 'endAddress': netaddr.IPAddress(pool.last)}} for pool in subnet_request.allocation_pools] else: ranges = [] request = {'ipamAddressPool': # max name length on backend is 255, so there is no problem here {'name': 'subnet_' + subnet_request.subnet_id, 'prefixLength': subnet_request.prefixlen, 'gateway': subnet_request.gateway_ip, 'ipRanges': ranges}} try: response = self._vcns.create_ipam_ip_pool(request) nsx_pool_id = response[1] except vc_exc.VcnsApiException as e: msg = _('Failed to create subnet IPAM: %s') % e raise ipam_exc.IpamValueInvalid(message=msg) return nsx_pool_id def delete_backend_pool(self, nsx_pool_id): try: self._vcns.delete_ipam_ip_pool(nsx_pool_id) except vc_exc.VcnsApiException as e: LOG.error("Failed to delete IPAM from backend: %s", e) # Continue anyway, since this subnet was already removed def update_backend_pool(self, subnet_request): # The NSX-v backend does not support changing the ip pool cidr # or gateway. # If this function is called - there is no need to update the backend pass class NsxvIpamSubnet(common.NsxAbstractIpamSubnet, common.NsxIpamBase): """Manage IP addresses for the NSX-V IPAM driver.""" def _get_vcns_error_code(self, e): """Get the error code out of VcnsApiException""" try: desc = et.fromstring(e.response) return int(desc.find('errorCode').text) except Exception: LOG.error('IPAM pool: Error code not present. %s', e.response) def backend_allocate(self, address_request): try: # allocate a specific IP if isinstance(address_request, ipam_req.SpecificAddressRequest): # This handles both specific and automatic address requests ip_address = str(address_request.address) self._vcns.allocate_ipam_ip_from_pool(self._nsx_pool_id, ip_addr=ip_address) else: # Allocate any free IP response = self._vcns.allocate_ipam_ip_from_pool( self._nsx_pool_id)[1] # get the ip from the response root = et.fromstring(response) ip_address = root.find('ipAddress').text except vc_exc.VcnsApiException as e: # handle backend failures error_code = self._get_vcns_error_code(e) if error_code == constants.NSX_ERROR_IPAM_ALLOCATE_IP_USED: # This IP is already in use raise ipam_exc.IpAddressAlreadyAllocated( ip=ip_address, subnet_id=self._subnet_id) if error_code == constants.NSX_ERROR_IPAM_ALLOCATE_ALL_USED: # No more IP addresses available on the pool raise ipam_exc.IpAddressGenerationFailure( subnet_id=self._subnet_id) else: raise ipam_exc.IPAllocationFailed() return ip_address def backend_deallocate(self, address): try: self._vcns.release_ipam_ip_to_pool(self._nsx_pool_id, address) except vc_exc.VcnsApiException as e: LOG.error("NSX IPAM failed to free ip %(ip)s of subnet %(id)s:" " %(e)s", {'e': e.response, 'ip': address, 'id': self._subnet_id}) raise ipam_exc.IpAddressAllocationNotFound( subnet_id=self._subnet_id, ip_address=address) def _get_pool_cidr(self, pool): # rebuild the cidr from the pool range & prefix using the first # range in the pool, because they all should belong to the same cidr cidr = '%s/%s' % (pool['ipRanges'][0]['startAddress'], pool['prefixLength']) # convert to a proper cidr cidr = netaddr.IPNetwork(cidr).cidr return str(cidr) def get_details(self): """Return subnet data as a SpecificSubnetRequest""" # get the pool from the backend pool_details = self._vcns.get_ipam_ip_pool(self._nsx_pool_id)[1] gateway_ip = pool_details['gateway'] # rebuild the cidr from the range & prefix cidr = self._get_pool_cidr(pool_details) pools = [] for ip_range in pool_details['ipRanges']: pools.append(netaddr.IPRange(ip_range['startAddress'], ip_range['endAddress'])) return ipam_req.SpecificSubnetRequest( self._tenant_id, self._subnet_id, cidr, gateway_ip=gateway_ip, allocation_pools=pools) vmware-nsx-12.0.1/vmware_nsx/services/l2gateway/0000775000175100017510000000000013244524600021641 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/l2gateway/nsx_v3/0000775000175100017510000000000013244524600023061 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/l2gateway/nsx_v3/__init__.py0000666000175100017510000000000013244523345025167 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/l2gateway/nsx_v3/driver.py0000666000175100017510000003631713244523345024747 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from networking_l2gw.db.l2gateway import l2gateway_db from networking_l2gw.services.l2gateway.common import constants as l2gw_const from networking_l2gw.services.l2gateway import exceptions as l2gw_exc from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils from neutron.plugins.common import utils as n_utils from neutron_lib.api.definitions import provider_net as providernet from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from vmware_nsx._i18n import _ from vmware_nsx.common import utils as nsx_utils from vmware_nsx.db import db as nsx_db from vmware_nsx.extensions import projectpluginmap from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import nsx_constants LOG = logging.getLogger(__name__) class NsxV3Driver(l2gateway_db.L2GatewayMixin): """Class to handle API calls for L2 gateway and NSXv3 backend.""" gateway_resource = l2gw_const.GATEWAY_RESOURCE_NAME def __init__(self, plugin): # Create a default L2 gateway if default_bridge_cluster is # provided in nsx.ini super(NsxV3Driver, self).__init__() self._plugin = plugin LOG.debug("Starting service plugin for NSX L2Gateway") self.subscribe_callback_notifications() LOG.debug("Initialization complete for NSXv3 driver for " "L2 gateway service plugin.") self.__core_plugin = None @property def _core_plugin(self): if not self.__core_plugin: self.__core_plugin = directory.get_plugin() if self.__core_plugin.is_tvd_plugin(): self.__core_plugin = self.__core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_T) return self.__core_plugin def subscribe_callback_notifications(self): registry.subscribe(self._prevent_l2gw_port_delete, resources.PORT, events.BEFORE_DELETE) registry.subscribe(self._ensure_default_l2_gateway, resources.PROCESS, events.BEFORE_SPAWN) def _ensure_default_l2_gateway(self, resource, event, trigger, payload=None): """ Create a default logical L2 gateway. Create a logical L2 gateway in the neutron database if the default_bridge_cluster config parameter is set and if it is not previously created. If not set, return. """ def_l2gw_name = cfg.CONF.nsx_v3.default_bridge_cluster # Return if no default_bridge_cluster set in config if not def_l2gw_name: LOG.info("NSX: Default bridge cluster not configured " "in nsx.ini. No default L2 gateway created.") return admin_ctx = context.get_admin_context() def_l2gw_uuid = ( self._core_plugin.nsxlib.bridge_cluster.get_id_by_name_or_id( def_l2gw_name)) # Optimistically create the default L2 gateway in neutron DB device = {'device_name': def_l2gw_uuid, 'interfaces': [{'name': 'default-bridge-cluster'}]} # TODO(asarfaty): Add a default v3 tenant-id to allow TVD filtering def_l2gw = {'name': 'default-l2gw', 'devices': [device]} l2gw_dict = {self.gateway_resource: def_l2gw} self.create_l2_gateway(admin_ctx, l2gw_dict) l2_gateway = super(NsxV3Driver, self).create_l2_gateway(admin_ctx, l2gw_dict) # Verify that only one default L2 gateway is created def_l2gw_exists = False l2gateways = self._get_l2_gateways(admin_ctx) for l2gateway in l2gateways: # Since we ensure L2 gateway is created with only 1 device, we use # the first device in the list. if l2gateway['devices'][0]['device_name'] == def_l2gw_uuid: if def_l2gw_exists: LOG.info("Default L2 gateway is already created.") try: # Try deleting this duplicate default L2 gateway self.validate_l2_gateway_for_delete( admin_ctx, l2gateway['id']) super(NsxV3Driver, self).delete_l2_gateway( admin_ctx, l2gateway['id']) except l2gw_exc.L2GatewayInUse: # If the L2 gateway we are trying to delete is in # use then we should delete the L2 gateway which # we just created ensuring there is only one # default L2 gateway in the database. super(NsxV3Driver, self).delete_l2_gateway( admin_ctx, l2_gateway['id']) else: def_l2gw_exists = True return l2_gateway def _prevent_l2gw_port_delete(self, resource, event, trigger, **kwargs): context = kwargs.get('context') port_id = kwargs.get('port_id') port_check = kwargs.get('port_check') if port_check: self.prevent_l2gw_port_deletion(context, port_id) def _validate_device_list(self, devices): # In NSXv3, one L2 gateway is mapped to one bridge cluster. # So we expect only one device to be configured as part of # a L2 gateway resource. The name of the device must be the bridge # cluster's UUID. if len(devices) != 1: msg = _("Only a single device is supported for one L2 gateway") raise n_exc.InvalidInput(error_message=msg) if not uuidutils.is_uuid_like(devices[0]['device_name']): msg = _("Device name must be configured with a UUID") raise n_exc.InvalidInput(error_message=msg) # Make sure the L2GW device ID exists as Bridge Cluster on NSX. try: self._core_plugin.nsxlib.bridge_cluster.get( devices[0]['device_name']) except nsxlib_exc.ResourceNotFound: msg = _("Could not find Bridge Cluster for L2 gateway device " "%s on NSX backend") % devices[0]['device_name'] LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) # One L2 gateway must have only one interface defined. interfaces = devices[0].get(l2gw_const.IFACE_NAME_ATTR) if len(interfaces) > 1: msg = _("Maximum of one interface is supported for one L2 gateway") raise n_exc.InvalidInput(error_message=msg) def create_l2_gateway(self, context, l2_gateway): """Create a logical L2 gateway.""" gw = l2_gateway[self.gateway_resource] devices = gw['devices'] self._validate_device_list(devices) def create_l2_gateway_precommit(self, context, l2_gateway): pass def create_l2_gateway_postcommit(self, context, l2_gateway): pass def update_l2_gateway_precommit(self, context, l2_gateway): pass def update_l2_gateway_postcommit(self, context, l2_gateway): pass def delete_l2_gateway(self, context, l2_gateway_id): pass def delete_l2_gateway_precommit(self, context, l2_gateway_id): pass def delete_l2_gateway_postcommit(self, context, l2_gateway_id): pass def _validate_network(self, context, network_id): network = self._core_plugin.get_network(context, network_id) network_type = network.get(providernet.NETWORK_TYPE) # If network is a provider network, verify whether it is of type VXLAN if network_type and network_type != nsx_utils.NsxV3NetworkTypes.VXLAN: msg = (_("Unsupported network type %s for L2 gateway " "connection. Only VXLAN network type supported") % network_type) raise n_exc.InvalidInput(error_message=msg) def _validate_segment_id(self, seg_id): if not seg_id: raise l2gw_exc.L2GatewaySegmentationRequired return n_utils.is_valid_vlan_tag(seg_id) def create_l2_gateway_connection(self, context, l2_gateway_connection): gw_connection = l2_gateway_connection.get(self.connection_resource) network_id = gw_connection.get(l2gw_const.NETWORK_ID) self._validate_network(context, network_id) def create_l2_gateway_connection_precommit(self, context, gw_connection): pass def create_l2_gateway_connection_postcommit(self, context, gw_connection): """Create a L2 gateway connection.""" l2gw_id = gw_connection.get(l2gw_const.L2GATEWAY_ID) network_id = gw_connection.get(l2gw_const.NETWORK_ID) devices = self._get_l2_gateway_devices(context, l2gw_id) # In NSXv3, there will be only one device configured per L2 gateway. # The name of the device shall carry the backend bridge cluster's UUID. device_name = devices[0].get('device_name') # The seg-id will be provided either during gateway create or gateway # connection create. l2gateway_db_mixin makes sure that it is # configured one way or the other. seg_id = gw_connection.get(l2gw_const.SEG_ID) if not seg_id: # Seg-id was not passed as part of connection-create. Retrieve # seg-id from L2 gateway's interface. interface = self._get_l2_gw_interfaces(context, devices[0]['id']) seg_id = interface[0].get(l2gw_const.SEG_ID) self._validate_segment_id(seg_id) tenant_id = gw_connection['tenant_id'] if context.is_admin and not tenant_id: tenant_id = context.tenant_id gw_connection['tenant_id'] = tenant_id try: tags = self._core_plugin.nsxlib.build_v3_tags_payload( gw_connection, resource_type='os-neutron-l2gw-id', project_name=context.tenant_name) bridge_endpoint = self._core_plugin.nsxlib.bridge_endpoint.create( device_name=device_name, seg_id=seg_id, tags=tags) except nsxlib_exc.ManagerError as e: LOG.exception("Unable to create bridge endpoint, rolling back " "changes on neutron. Exception is %s", e) raise l2gw_exc.L2GatewayServiceDriverError( method='create_l2_gateway_connection_postcommit') #TODO(abhiraut): Consider specifying the name of the port # Create a logical port and connect it to the bridge endpoint. port_dict = {'port': { 'tenant_id': tenant_id, 'network_id': network_id, 'mac_address': constants.ATTR_NOT_SPECIFIED, 'admin_state_up': True, 'fixed_ips': [], 'device_id': bridge_endpoint['id'], 'device_owner': nsx_constants.BRIDGE_ENDPOINT, 'name': '', }} try: #TODO(abhiraut): Consider adding UT for port check once UTs are # refactored port = self._core_plugin.create_port(context, port_dict, l2gw_port_check=True) # Deallocate IP address from the port. for fixed_ip in port.get('fixed_ips', []): self._core_plugin._delete_ip_allocation(context, network_id, fixed_ip['subnet_id'], fixed_ip['ip_address']) LOG.debug("IP addresses deallocated on port %s", port['id']) except (nsxlib_exc.ManagerError, n_exc.NeutronException): LOG.exception("Unable to create L2 gateway port, " "rolling back changes on neutron") self._core_plugin.nsxlib.bridge_endpoint.delete( bridge_endpoint['id']) raise l2gw_exc.L2GatewayServiceDriverError( method='create_l2_gateway_connection_postcommit') try: # Update neutron's database with the mappings. nsx_db.add_l2gw_connection_mapping( session=context.session, connection_id=gw_connection['id'], bridge_endpoint_id=bridge_endpoint['id'], port_id=port['id']) except db_exc.DBError: with excutils.save_and_reraise_exception(): LOG.exception("Unable to add L2 gateway connection " "mappings, rolling back changes on neutron") self._core_plugin.nsxlib.bridge_endpoint.delete( bridge_endpoint['id']) super(NsxV3Driver, self).delete_l2_gateway_connection( context, gw_connection['id']) return gw_connection def delete_l2_gateway_connection_postcommit(self, context, gw_connection): pass def delete_l2_gateway_connection_precommit(self, context, gw_connection): pass def delete_l2_gateway_connection(self, context, gw_connection): """Delete a L2 gateway connection.""" conn_mapping = nsx_db.get_l2gw_connection_mapping( session=context.session, connection_id=gw_connection) bridge_endpoint_id = conn_mapping.get('bridge_endpoint_id') # Delete the logical port from the bridge endpoint. self._core_plugin.delete_port(context=context, port_id=conn_mapping.get('port_id'), l2gw_port_check=False) try: self._core_plugin.nsxlib.bridge_endpoint.delete(bridge_endpoint_id) except nsxlib_exc.ManagerError as e: LOG.exception("Unable to delete bridge endpoint %(id)s on the " "backend due to exc: %(exc)s", {'id': bridge_endpoint_id, 'exc': e}) raise l2gw_exc.L2GatewayServiceDriverError( method='delete_l2_gateway_connection') def prevent_l2gw_port_deletion(self, context, port_id): """Prevent core plugin from deleting L2 gateway port.""" try: port = self._core_plugin.get_port(context, port_id) except n_exc.PortNotFound: return if port['device_owner'] == nsx_constants.BRIDGE_ENDPOINT: reason = _("has device owner %s") % port['device_owner'] raise n_exc.ServicePortInUse(port_id=port_id, reason=reason) vmware-nsx-12.0.1/vmware_nsx/services/l2gateway/__init__.py0000666000175100017510000000000013244523345023747 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/l2gateway/nsx_tvd/0000775000175100017510000000000013244524600023326 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/l2gateway/nsx_tvd/__init__.py0000666000175100017510000000000013244523345025434 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/l2gateway/nsx_tvd/driver.py0000666000175100017510000001455313244523345025212 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from networking_l2gw.db.l2gateway import l2gateway_db from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from oslo_log import log as logging from vmware_nsx.db import db as nsx_db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.services.l2gateway.nsx_v import driver as v_driver from vmware_nsx.services.l2gateway.nsx_v3 import driver as t_driver LOG = logging.getLogger(__name__) class NsxTvdL2GatewayDriver(l2gateway_db.L2GatewayMixin): """Class to handle API calls for L2 gateway and NSX-TVD plugin wrapper.""" def __init__(self, plugin): super(NsxTvdL2GatewayDriver, self).__init__() self._plugin = plugin # supported drivers: self.drivers = {} try: self.drivers[projectpluginmap.NsxPlugins.NSX_T] = ( t_driver.NsxV3Driver(plugin)) except Exception: LOG.warning("NsxTvdL2GatewayDriver failed to initialize the NSX-T " "driver") self.drivers[projectpluginmap.NsxPlugins.NSX_T] = None try: self.drivers[projectpluginmap.NsxPlugins.NSX_V] = ( v_driver.NsxvL2GatewayDriver(plugin)) except Exception: LOG.warning("NsxTvdL2GatewayDriver failed to initialize the NSX-V " "driver") self.drivers[projectpluginmap.NsxPlugins.NSX_V] = None def _get_driver_for_project(self, context, project): """Get the l2gw driver by the plugin of the project""" mapping = nsx_db.get_project_plugin_mapping( context.session, project) if mapping: plugin_type = mapping['plugin'] else: msg = _("Couldn't find the plugin project %s is using") % project raise n_exc.InvalidInput(error_message=msg) if plugin_type not in self.drivers: msg = (_("Project %(project)s with plugin %(plugin)s has no " "support for L2GW") % {'project': project, 'plugin': plugin_type}) raise n_exc.InvalidInput(error_message=msg) # make sure the core plugin is supported core_plugin = directory.get_plugin() if not core_plugin.get_plugin_by_type(plugin_type): msg = (_("Plugin %(plugin)s for project %(project)s is not " "supported by the core plugin") % {'project': project, 'plugin': plugin_type}) raise n_exc.InvalidInput(error_message=msg) return self.drivers[plugin_type] def create_l2_gateway(self, context, l2_gateway): d = self._get_driver_for_project( context, l2_gateway['l2_gateway']['tenant_id']) return d.create_l2_gateway(context, l2_gateway) def create_l2_gateway_precommit(self, context, l2_gateway): # Not implemented by any of the plugins pass def create_l2_gateway_postcommit(self, context, l2_gateway): # Not implemented by any of the plugins pass def update_l2_gateway(self, context, l2_gateway): # Not implemented by any of the plugins pass def update_l2_gateway_precommit(self, context, l2_gateway): # Not implemented by any of the plugins pass def update_l2_gateway_postcommit(self, context, l2_gateway): # Not implemented by any of the plugins pass def create_l2_gateway_connection(self, context, l2_gateway_connection): d = self._get_driver_for_project( context, l2_gateway_connection['l2_gateway_connection']['tenant_id']) return d.create_l2_gateway_connection(context, l2_gateway_connection) def create_l2_gateway_connection_precommit(self, contex, gw_connection): # Not implemented by any of the plugins pass def create_l2_gateway_connection_postcommit(self, context, gw_connection): d = self._get_driver_for_project(context, gw_connection['tenant_id']) return d.create_l2_gateway_connection_postcommit( context, gw_connection) def _get_gw_connection_driver(self, context, l2gw_connection_id): l2gw_conn = self._plugin._get_l2_gateway_connection( context, l2gw_connection_id) return self._get_driver_for_project(context, l2gw_conn.tenant_id) def delete_l2_gateway_connection(self, context, l2_gateway_connection_id): d = self._get_gw_connection_driver(context, l2_gateway_connection_id) return d.delete_l2_gateway_connection( context, l2_gateway_connection_id) def delete_l2_gateway_connection_precommit(self, context, l2_gateway_connection): # Not implemented by any of the plugins pass def delete_l2_gateway_connection_postcommit(self, context, l2_gateway_connection_id): # Not implemented by any of the plugins #Note(asarfaty): in postcommit the l2_gateway_connection was already # deleted so we cannot decide on the plugin by the project of the # connection. pass def delete_l2_gateway(self, context, l2_gateway_id): l2gw = self._plugin._get_l2_gateway(context, l2_gateway_id) d = self._get_driver_for_project( context, l2gw['tenant_id']) return d.delete_l2_gateway(context, l2_gateway_id) def delete_l2_gateway_precommit(self, context, l2_gateway): # Not implemented by any of the plugins pass def delete_l2_gateway_postcommit(self, context, l2_gateway): # Not implemented by any of the plugins #Note(asarfaty): in postcommit the l2_gateway was already deleted # so we cannot decide on the plugin by the project of the gw. pass vmware-nsx-12.0.1/vmware_nsx/services/l2gateway/nsx_tvd/plugin.py0000666000175100017510000000176013244523345025211 0ustar zuulzuul00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from networking_l2gw.services.l2gateway import plugin from vmware_nsx.plugins.nsx import utils as tvd_utils @tvd_utils.filter_plugins class L2GatewayPlugin(plugin.L2GatewayPlugin): """NSX-TV plugin for L2GW. This plugin adds separation between T/V instances """ methods_to_separate = ['get_l2_gateways', 'get_l2_gateway_connections'] vmware-nsx-12.0.1/vmware_nsx/services/l2gateway/nsx_v/0000775000175100017510000000000013244524600022776 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/l2gateway/nsx_v/__init__.py0000666000175100017510000000000013244523345025104 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/l2gateway/nsx_v/driver.py0000666000175100017510000002244513244523345024661 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from networking_l2gw.db.l2gateway import l2gateway_db from networking_l2gw.db.l2gateway import l2gateway_models as models from networking_l2gw.services.l2gateway.common import constants as l2gw_const from networking_l2gw.services.l2gateway import exceptions as l2gw_exc from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from oslo_log import log as logging from oslo_utils import uuidutils from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import nsxv_constants from vmware_nsx.db import db as nsx_db from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import exceptions LOG = logging.getLogger(__name__) class NsxvL2GatewayDriver(l2gateway_db.L2GatewayMixin): """Class to handle API calls for L2 gateway and NSXv backend.""" def __init__(self, plugin): super(NsxvL2GatewayDriver, self).__init__() self._plugin = plugin self.__core_plugin = None @property def _core_plugin(self): if not self.__core_plugin: self.__core_plugin = directory.get_plugin() if self.__core_plugin.is_tvd_plugin(): self.__core_plugin = self.__core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_V) return self.__core_plugin @property def _nsxv(self): return self._core_plugin.nsx_v @property def _edge_manager(self): return self._core_plugin.edge_manager def _validate_device_list(self, devices): # In NSX-v, one L2 gateway is mapped to one DLR. # So we expect only one device to be configured as part of # a L2 gateway resource. if len(devices) != 1: msg = _("Only a single device is supported for one L2 gateway") raise n_exc.InvalidInput(error_message=msg) def _get_l2gateway_interface(self, context, interface_name): """Get all l2gateway_interfaces_by interface_name.""" session = context.session with session.begin(): return session.query(models.L2GatewayInterface).filter_by( interface_name=interface_name).all() def _validate_interface_list(self, context, interfaces): # In NSXv, interface is mapped to a vDS VLAN port group. # Since HA is not supported, only one interface is expected if len(interfaces) != 1: msg = _("Only a single interface is supported for one L2 gateway") raise n_exc.InvalidInput(error_message=msg) if not self._nsxv.vcns.validate_network(interfaces[0]['name']): msg = _("Configured interface not found") raise n_exc.InvalidInput(error_message=msg) interface = self._get_l2gateway_interface(context, interfaces[0]['name']) if interface: msg = _("%s is already used.") % interfaces[0]['name'] raise n_exc.InvalidInput(error_message=msg) def create_l2_gateway_precommit(self, context, l2_gateway): pass def create_l2_gateway_postcommit(self, context, l2_gateway): pass def create_l2_gateway(self, context, l2_gateway): """Create a logical L2 gateway.""" self._admin_check(context, 'CREATE') gw = l2_gateway[self.gateway_resource] devices = gw['devices'] self._validate_device_list(devices) interfaces = devices[0]['interfaces'] self._validate_interface_list(context, interfaces) # Create a dedicated DLR try: edge_id = self._create_l2_gateway_edge(context) except nsx_exc.NsxL2GWDeviceNotFound: LOG.exception("Failed to create backend device " "for L2 gateway") raise devices[0]['device_name'] = edge_id l2_gateway[self.gateway_resource]['devices'] = devices return def update_l2_gateway_precommit(self, context, l2_gateway): pass def update_l2_gateway_postcommit(self, context, l2_gateway): pass def _create_l2_gateway_edge(self, context): # Create a dedicated DLR lrouter = {'name': nsxv_constants.L2_GATEWAY_EDGE, 'id': uuidutils.generate_uuid()} # Create the router on the default availability zone availability_zone = (nsx_az.NsxVAvailabilityZones(). get_default_availability_zone()) self._edge_manager.create_lrouter(context, lrouter, lswitch=None, dist=True, availability_zone=availability_zone) edge_binding = nsxv_db.get_nsxv_router_binding(context.session, lrouter['id']) if not edge_binding: raise nsx_exc.NsxL2GWDeviceNotFound() # Enable edge HA on the DLR if availability_zone.edge_ha: edge_id = edge_binding['edge_id'] self._edge_manager.nsxv_manager.update_edge_ha(edge_id) return edge_binding['edge_id'] def _get_device(self, context, l2gw_id): devices = self._get_l2_gateway_devices(context, l2gw_id) return devices[0] def create_l2_gateway_connection_precommit(self, contex, gw_connection): pass def create_l2_gateway_connection_postcommit(self, context, gw_connection): network_id = gw_connection.get('network_id') virtual_wire = nsx_db.get_nsx_switch_ids(context.session, network_id) # In NSX-v, there will be only one device configured per L2 gateway. # The name of the device shall carry the backend DLR. l2gw_id = gw_connection.get(l2gw_const.L2GATEWAY_ID) device = self._get_device(context, l2gw_id) device_name = device.get('device_name') device_id = device.get('id') interface = self._get_l2_gw_interfaces(context, device_id) interface_name = interface[0].get("interface_name") bridge_name = "bridge-" + uuidutils.generate_uuid() bridge_dict = {"bridges": {"bridge": {"name": bridge_name, "virtualWire": virtual_wire[0], "dvportGroup": interface_name}}} try: self._nsxv.create_bridge(device_name, bridge_dict) except exceptions.VcnsApiException: LOG.exception("Failed to update NSX, " "rolling back changes on neutron.") raise l2gw_exc.L2GatewayServiceDriverError( method='create_l2_gateway_connection_postcommit') return def create_l2_gateway_connection(self, context, l2_gateway_connection): """Create a L2 gateway connection.""" gw_connection = l2_gateway_connection.get(l2gw_const. CONNECTION_RESOURCE_NAME) l2gw_id = gw_connection.get(l2gw_const.L2GATEWAY_ID) gw_db = self._get_l2_gateway(context, l2gw_id) if gw_db.network_connections: raise nsx_exc.NsxL2GWInUse(gateway_id=l2gw_id) return def delete_l2_gateway_connection_precommit(self, context, l2_gateway_connection): pass def delete_l2_gateway_connection_postcommit(self, context, l2_gateway_connection): pass def delete_l2_gateway_connection(self, context, l2_gateway_connection): """Delete a L2 gateway connection.""" self._admin_check(context, 'DELETE') gw_connection = self.get_l2_gateway_connection(context, l2_gateway_connection) if not gw_connection: raise l2gw_exc.L2GatewayConnectionNotFound( l2_gateway_connection) l2gw_id = gw_connection.get(l2gw_const.L2GATEWAY_ID) device = self._get_device(context, l2gw_id) device_name = device.get('device_name') self._nsxv.delete_bridge(device_name) def delete_l2_gateway(self, context, l2_gateway): """Delete a L2 gateway.""" self._admin_check(context, 'DELETE') device = self._get_device(context, l2_gateway) edge_id = device.get('device_name') rtr_binding = nsxv_db.get_nsxv_router_binding_by_edge( context.session, edge_id) if rtr_binding: self._edge_manager.delete_lrouter(context, rtr_binding['router_id']) def delete_l2_gateway_precommit(self, context, l2_gateway): pass def delete_l2_gateway_postcommit(self, context, l2_gateway): pass vmware-nsx-12.0.1/vmware_nsx/services/__init__.py0000666000175100017510000000000013244523345022050 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/dynamic_routing/0000775000175100017510000000000013244524600023135 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/dynamic_routing/__init__.py0000666000175100017510000000000013244523345025243 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/dynamic_routing/bgp_plugin.py0000666000175100017510000004226013244523345025650 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_dynamic_routing.db import bgp_db from neutron_dynamic_routing.extensions import bgp as bgp_ext from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import context as n_context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_lib.services import base as service_base from oslo_log import log as logging from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import edge_service_gateway_bgp_peer as ext_esg from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx import utils as tvd_utils from vmware_nsx.services.dynamic_routing.nsx_v import driver as nsxv_driver LOG = logging.getLogger(__name__) PLUGIN_NAME = bgp_ext.BGP_EXT_ALIAS + '_nsx_svc_plugin' @tvd_utils.filter_plugins class NSXBgpPlugin(service_base.ServicePluginBase, bgp_db.BgpDbMixin): """BGP service plugin for NSX-V as well as TVD plugins. Currently only the nsx-v is supported. other plugins will be refused. """ supported_extension_aliases = [bgp_ext.BGP_EXT_ALIAS, ext_esg.ESG_BGP_PEER_EXT_ALIAS] methods_to_separate = ['get_bgp_speakers', 'get_bgp_peers'] def __init__(self): super(NSXBgpPlugin, self).__init__() self._core_plugin = directory.get_plugin() # initialize the supported drivers (currently only NSX-v) self.drivers = {} try: self.drivers[projectpluginmap.NsxPlugins.NSX_V] = ( nsxv_driver.NSXvBgpDriver(self)) except Exception: # No driver found LOG.warning("NSXBgpPlugin failed to initialize the NSX-V driver") self.drivers[projectpluginmap.NsxPlugins.NSX_V] = None self._register_callbacks() def get_plugin_name(self): return PLUGIN_NAME def get_plugin_type(self): return bgp_ext.BGP_EXT_ALIAS def get_plugin_description(self): """returns string description of the plugin.""" return ("BGP dynamic routing service for announcement of next-hops " "for project networks, floating IP's, and DVR host routes.") def _register_callbacks(self): registry.subscribe(self.router_interface_callback, resources.ROUTER_INTERFACE, events.AFTER_CREATE) registry.subscribe(self.router_interface_callback, resources.ROUTER_INTERFACE, events.AFTER_DELETE) registry.subscribe(self.router_gateway_callback, resources.ROUTER_GATEWAY, events.AFTER_UPDATE) registry.subscribe(self.router_gateway_callback, resources.ROUTER_GATEWAY, events.AFTER_DELETE) registry.subscribe(self._after_service_edge_create_callback, nsxv_constants.SERVICE_EDGE, events.AFTER_CREATE) registry.subscribe(self._before_service_edge_delete_callback, nsxv_constants.SERVICE_EDGE, events.BEFORE_DELETE) def _get_driver_by_project(self, context, project): # Check if the current project id has a matching driver # Currently only NSX-V is supported if self._core_plugin.is_tvd_plugin(): plugin_type = self._core_plugin.get_plugin_type_from_project( context, project) else: plugin_type = self._core_plugin.plugin_type() if not self.drivers.get(plugin_type): msg = (_("Project %(project)s with plugin %(plugin)s has no " "support for dynamic routing") % { 'project': project, 'plugin': plugin_type}) raise n_exc.InvalidInput(error_message=msg) return self.drivers[plugin_type] def _get_driver_by_speaker(self, context, bgp_speaker_id): try: speaker = self.get_bgp_speaker(context, bgp_speaker_id) except Exception: msg = _("BGP speaker %s could not be found") % bgp_speaker_id raise n_exc.BadRequest(resource=bgp_ext.BGP_SPEAKER_RESOURCE_NAME, msg=msg) return self._get_driver_by_project(context, speaker['tenant_id']) def create_bgp_speaker(self, context, bgp_speaker): driver = self._get_driver_by_project( context, bgp_speaker['bgp_speaker']['tenant_id']) driver.create_bgp_speaker(context, bgp_speaker) return super(NSXBgpPlugin, self).create_bgp_speaker(context, bgp_speaker) def update_bgp_speaker(self, context, bgp_speaker_id, bgp_speaker): driver = self._get_driver_by_speaker(context, bgp_speaker_id) with locking.LockManager.get_lock(str(bgp_speaker_id)): driver.update_bgp_speaker(context, bgp_speaker_id, bgp_speaker) # TBD(roeyc): rolling back changes on edges base class call failed. return super(NSXBgpPlugin, self).update_bgp_speaker( context, bgp_speaker_id, bgp_speaker) def delete_bgp_speaker(self, context, bgp_speaker_id): driver = self._get_driver_by_speaker(context, bgp_speaker_id) with locking.LockManager.get_lock(str(bgp_speaker_id)): driver.delete_bgp_speaker(context, bgp_speaker_id) super(NSXBgpPlugin, self).delete_bgp_speaker(context, bgp_speaker_id) def _add_esg_peer_info(self, context, peer): # TODO(asarfaty): only if nsxv driver, or do it in the driver itself binding = nsxv_db.get_nsxv_bgp_peer_edge_binding(context.session, peer['id']) if binding: peer['esg_id'] = binding['edge_id'] def get_bgp_peer(self, context, bgp_peer_id, fields=None): peer = super(NSXBgpPlugin, self).get_bgp_peer(context, bgp_peer_id, fields) if not fields or 'esg_id' in fields: self._add_esg_peer_info(context, peer) return peer def get_bgp_peers_by_bgp_speaker(self, context, bgp_speaker_id, fields=None): ret = super(NSXBgpPlugin, self).get_bgp_peers_by_bgp_speaker( context, bgp_speaker_id, fields=fields) if fields is None or 'esg_id' in fields: for peer in ret: self._add_esg_peer_info(context, peer) return ret def _get_driver_by_peer(self, context, bgp_peer_id): try: peer = self.get_bgp_peer(context, bgp_peer_id) except Exception: raise bgp_ext.BgpPeerNotFound(id=bgp_peer_id) return self._get_driver_by_project(context, peer['tenant_id']) def create_bgp_peer(self, context, bgp_peer): driver = self._get_driver_by_project( context, bgp_peer['bgp_peer']['tenant_id']) driver.create_bgp_peer(context, bgp_peer) peer = super(NSXBgpPlugin, self).create_bgp_peer(context, bgp_peer) # TODO(asarfaty): only if nsxv driver, or do it in the driver itself esg_id = bgp_peer['bgp_peer'].get('esg_id') if esg_id: nsxv_db.add_nsxv_bgp_peer_edge_binding(context.session, peer['id'], esg_id) peer['esg_id'] = esg_id return peer def update_bgp_peer(self, context, bgp_peer_id, bgp_peer): driver = self._get_driver_by_peer(context, bgp_peer_id) super(NSXBgpPlugin, self).update_bgp_peer(context, bgp_peer_id, bgp_peer) driver.update_bgp_peer(context, bgp_peer_id, bgp_peer) return self.get_bgp_peer(context, bgp_peer_id) def delete_bgp_peer(self, context, bgp_peer_id): driver = self._get_driver_by_peer(context, bgp_peer_id) bgp_peer_info = {'bgp_peer_id': bgp_peer_id} bgp_speaker_ids = driver._get_bgp_speakers_by_bgp_peer( context, bgp_peer_id) for speaker_id in bgp_speaker_ids: try: self.remove_bgp_peer(context, speaker_id, bgp_peer_info) except bgp_ext.BgpSpeakerPeerNotAssociated: LOG.debug("Couldn't find bgp speaker %s peer binding while " "deleting bgp peer %s", speaker_id, bgp_peer_id) super(NSXBgpPlugin, self).delete_bgp_peer(context, bgp_peer_id) def add_bgp_peer(self, context, bgp_speaker_id, bgp_peer_info): # speaker & peer must belong to the same driver if not bgp_peer_info.get('bgp_peer_id'): msg = _("bgp_peer_id must be specified") raise n_exc.BadRequest(resource='bgp-peer', msg=msg) peer_driver = self._get_driver_by_peer( context, bgp_peer_info['bgp_peer_id']) speaker_driver = self._get_driver_by_speaker(context, bgp_speaker_id) if peer_driver != speaker_driver: msg = _("Peer and Speaker must belong to the same plugin") raise n_exc.InvalidInput(error_message=msg) with locking.LockManager.get_lock(str(bgp_speaker_id)): speaker_driver.add_bgp_peer(context, bgp_speaker_id, bgp_peer_info) return super(NSXBgpPlugin, self).add_bgp_peer(context, bgp_speaker_id, bgp_peer_info) def remove_bgp_peer(self, context, bgp_speaker_id, bgp_peer_info): driver = self._get_driver_by_speaker(context, bgp_speaker_id) with locking.LockManager.get_lock(str(bgp_speaker_id)): ret = super(NSXBgpPlugin, self).remove_bgp_peer( context, bgp_speaker_id, bgp_peer_info) driver.remove_bgp_peer(context, bgp_speaker_id, bgp_peer_info) return ret def _validate_network_plugin( self, context, network_info, plugin_type=projectpluginmap.NsxPlugins.NSX_V): """Make sure the network belongs to the NSX0-V plugin""" if not network_info.get('network_id'): msg = _("network_id must be specified") raise n_exc.BadRequest(resource=bgp_ext.BGP_SPEAKER_RESOURCE_NAME, msg=msg) net_id = network_info['network_id'] p = self._core_plugin._get_plugin_from_net_id(context, net_id) if p.plugin_type() != plugin_type: msg = (_('Network should belong to the %s plugin as the bgp ' 'speaker') % plugin_type) raise n_exc.InvalidInput(error_message=msg) def add_gateway_network(self, context, bgp_speaker_id, network_info): driver = self._get_driver_by_speaker(context, bgp_speaker_id) if self._core_plugin.is_tvd_plugin(): # The plugin of the network and speaker must be the same self._validate_network_plugin(context, network_info) with locking.LockManager.get_lock(str(bgp_speaker_id)): driver.add_gateway_network(context, bgp_speaker_id, network_info) return super(NSXBgpPlugin, self).add_gateway_network( context, bgp_speaker_id, network_info) def remove_gateway_network(self, context, bgp_speaker_id, network_info): driver = self._get_driver_by_speaker(context, bgp_speaker_id) with locking.LockManager.get_lock(str(bgp_speaker_id)): super(NSXBgpPlugin, self).remove_gateway_network( context, bgp_speaker_id, network_info) driver.remove_gateway_network(context, bgp_speaker_id, network_info) def get_advertised_routes(self, context, bgp_speaker_id): driver = self._get_driver_by_speaker(context, bgp_speaker_id) return driver.get_advertised_routes(context, bgp_speaker_id) def router_interface_callback(self, resource, event, trigger, **kwargs): if not kwargs['network_id']: # No GW network, hence no BGP speaker associated return context = kwargs['context'].elevated() router_id = kwargs['router_id'] subnets = kwargs.get('subnets') network_id = kwargs['network_id'] port = kwargs['port'] speakers = self._bgp_speakers_for_gateway_network(context, network_id) for speaker in speakers: speaker_id = speaker.id with locking.LockManager.get_lock(str(speaker_id)): speaker = self.get_bgp_speaker(context, speaker_id) driver = self._get_driver_by_project( context, speaker['tenant_id']) if network_id not in speaker['networks']: continue if event == events.AFTER_CREATE: driver.advertise_subnet(context, speaker_id, router_id, subnets[0]) if event == events.AFTER_DELETE: subnet_id = port['fixed_ips'][0]['subnet_id'] driver.withdraw_subnet(context, speaker_id, router_id, subnet_id) def router_gateway_callback(self, resource, event, trigger, **kwargs): context = kwargs.get('context') or n_context.get_admin_context() context = context.elevated() router_id = kwargs['router_id'] network_id = kwargs['network_id'] speakers = self._bgp_speakers_for_gateway_network(context, network_id) for speaker in speakers: speaker_id = speaker.id driver = self._get_driver_by_project( context, speaker['tenant_id']) with locking.LockManager.get_lock(str(speaker_id)): speaker = self.get_bgp_speaker(context, speaker_id) if network_id not in speaker['networks']: continue if event == events.AFTER_DELETE: gw_ips = kwargs['gateway_ips'] driver.disable_bgp_on_router(context, speaker, router_id, gw_ips[0]) if event == events.AFTER_UPDATE: updated_port = kwargs['updated_port'] router = kwargs['router'] driver.process_router_gw_port_update( context, speaker, router, updated_port) def _before_service_edge_delete_callback(self, resource, event, trigger, **kwargs): context = kwargs['context'].elevated() router = kwargs['router'] ext_net_id = router.gw_port and router.gw_port['network_id'] gw_ip = router.gw_port and router.gw_port['fixed_ips'][0]['ip_address'] edge_id = kwargs.get('edge_id') speakers = self._bgp_speakers_for_gateway_network(context, ext_net_id) for speaker in speakers: driver = self._get_driver_by_project( context, speaker['tenant_id']) with locking.LockManager.get_lock(speaker.id): speaker = self.get_bgp_speaker(context, speaker.id) if ext_net_id not in speaker['networks']: continue driver.disable_bgp_on_router(context, speaker, router['id'], gw_ip, edge_id) def _after_service_edge_create_callback(self, resource, event, trigger, **kwargs): context = kwargs['context'].elevated() router = kwargs['router'] ext_net_id = router.gw_port and router.gw_port['network_id'] speakers = self._bgp_speakers_for_gateway_network(context, ext_net_id) for speaker in speakers: driver = self._get_driver_by_project( context, speaker['tenant_id']) with locking.LockManager.get_lock(speaker.id): speaker = self.get_bgp_speaker(context, speaker.id) if ext_net_id not in speaker['networks']: continue driver.enable_bgp_on_router(context, speaker, router['id']) class NSXvBgpPlugin(NSXBgpPlugin): """Defined for backwards compatibility only""" pass vmware-nsx-12.0.1/vmware_nsx/services/dynamic_routing/nsx_v/0000775000175100017510000000000013244524600024272 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/dynamic_routing/nsx_v/__init__.py0000666000175100017510000000000013244523345026400 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/dynamic_routing/nsx_v/driver.py0000666000175100017510000010065513244523345026155 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_dynamic_routing.extensions import bgp as bgp_ext from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron_lib.api.definitions import address_scope from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib import constants as n_const from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import edge_service_gateway_bgp_peer as ext_esg_peer from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as vcns_exc LOG = logging.getLogger(__name__) def ip_prefix(name, ip_address): return {'ipPrefix': {'name': name, 'ipAddress': ip_address}} def redistribution_rule(advertise_static_routes, prefix_name, action='permit'): rule = { 'prefixName': prefix_name, 'action': action, 'from': { 'ospf': False, 'bgp': False, 'connected': not advertise_static_routes, 'static': advertise_static_routes } } return {'rule': rule} def _get_bgp_neighbour(ip_address, remote_as, password, direction): bgp_filter = {'bgpFilter': [{'direction': direction, 'action': 'permit'}]} nbr = { 'ipAddress': ip_address, 'remoteAS': remote_as, 'bgpFilters': bgp_filter, 'holdDownTimer': cfg.CONF.nsxv.bgp_neighbour_hold_down_timer, 'keepAliveTimer': cfg.CONF.nsxv.bgp_neighbour_keep_alive_timer } if password: nbr['password'] = password return {'bgpNeighbour': nbr} def bgp_neighbour_from_peer(bgp_peer): return _get_bgp_neighbour(bgp_peer['peer_ip'], bgp_peer['remote_as'], bgp_peer['password'], direction='out') def gw_bgp_neighbour(ip_address, remote_as, password): return _get_bgp_neighbour(ip_address, remote_as, password, direction='in') class NSXvBgpDriver(object): """Class driver to address the neutron_dynamic_routing API""" def __init__(self, plugin): super(NSXvBgpDriver, self).__init__() self._plugin = plugin self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin(): self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_V) if not self._core_plugin: err_msg = _("NSXv BGP cannot work without the NSX-V core plugin") raise n_exc.InvalidInput(error_message=err_msg) self._nsxv = self._core_plugin.nsx_v self._edge_manager = self._core_plugin.edge_manager def prefix_name(self, subnet_id): return 'subnet-%s' % subnet_id def _get_router_edge_info(self, context, router_id): edge_binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if not edge_binding: return None, None # Indicates which routes should be advertised - connected or static. advertise_static_routes = False if edge_binding['edge_type'] != nsxv_constants.SERVICE_EDGE: # Distributed router plr_id = self._edge_manager.get_plr_by_tlr_id(context, router_id) edge_binding = nsxv_db.get_nsxv_router_binding(context.session, plr_id) if not edge_binding: # Distributed router isn't bound to plr return None, None # PLR for distributed router, advertise static routes. advertise_static_routes = True return edge_binding['edge_id'], advertise_static_routes def get_advertised_routes(self, context, bgp_speaker_id): routes = [] bgp_speaker = self._plugin.get_bgp_speaker(context, bgp_speaker_id) edge_router_dict = ( self._get_dynamic_routing_edge_list(context, bgp_speaker['networks'][0], bgp_speaker_id)) for edge_id, edge_router_config in edge_router_dict.items(): bgp_identifier = edge_router_config['bgp_identifier'] subnets = self._query_tenant_subnets( context, edge_router_config['no_snat_routers']) routes.extend([(subnet['cidr'], bgp_identifier) for subnet in subnets]) routes = self._plugin._make_advertised_routes_list(routes) return self._plugin._make_advertised_routes_dict(routes) def _get_dynamic_routing_edge_list(self, context, gateway_network_id, bgp_speaker_id): # Filter the routers attached this network as gateway interface filters = {'network_id': [gateway_network_id], 'device_owner': [n_const.DEVICE_OWNER_ROUTER_GW]} fields = ['device_id', 'fixed_ips'] gateway_ports = self._core_plugin.get_ports(context, filters=filters, fields=fields) bgp_bindings = nsxv_db.get_nsxv_bgp_speaker_bindings( context.session, bgp_speaker_id) binding_info = {bgp_binding['edge_id']: bgp_binding['bgp_identifier'] for bgp_binding in bgp_bindings} edge_router_dict = {} for port in gateway_ports: router_id = port['device_id'] router = self._core_plugin._get_router(context, router_id) edge_id, advertise_static_routes = ( self._get_router_edge_info(context, router_id)) if not edge_id: # Shared router is not attached on any edge continue if edge_id not in edge_router_dict: bgp_identifier = binding_info.get( edge_id, port['fixed_ips'][0]['ip_address']) edge_router_dict[edge_id] = {'no_snat_routers': [], 'bgp_identifier': bgp_identifier, 'advertise_static_routes': advertise_static_routes} if not router.enable_snat: edge_router_dict[edge_id]['no_snat_routers'].append(router_id) return edge_router_dict def _get_md_proxy_for_router(self, context, router_id): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) md_proxy = None if binding: az_name = binding['availability_zone'] md_proxy = self._core_plugin.get_metadata_proxy_handler( az_name) return md_proxy def _query_tenant_subnets(self, context, router_ids): # Query subnets attached to all of routers attached to same edge subnets = [] for router_id in router_ids: filters = {'device_id': [router_id], 'device_owner': [n_const.DEVICE_OWNER_ROUTER_INTF]} int_ports = self._core_plugin.get_ports(context, filters=filters, fields=['fixed_ips']) # We need to skip metadata subnets md_proxy = self._get_md_proxy_for_router(context, router_id) for p in int_ports: subnet_id = p['fixed_ips'][0]['subnet_id'] if md_proxy and md_proxy.is_md_subnet(subnet_id): continue subnet = self._core_plugin.get_subnet(context, subnet_id) subnets.append({'id': subnet_id, 'cidr': subnet['cidr']}) LOG.debug("Got related subnets %s", subnets) return subnets def _get_bgp_speakers_by_bgp_peer(self, context, bgp_peer_id): fields = ['id', 'peers'] bgp_speakers = self._plugin.get_bgp_speakers(context, fields=fields) bgp_speaker_ids = [bgp_speaker['id'] for bgp_speaker in bgp_speakers if bgp_peer_id in bgp_speaker['peers']] return bgp_speaker_ids def _get_prefixes_and_redistribution_rules(self, subnets, advertise_static_routes): prefixes = [] redis_rules = [] for subnet in subnets: prefix_name = self.prefix_name(subnet['id']) prefix = ip_prefix(prefix_name, subnet['cidr']) prefixes.append(prefix) rule = redistribution_rule(advertise_static_routes, prefix_name) redis_rules.append(rule) return prefixes, redis_rules def create_bgp_speaker(self, context, bgp_speaker): bgp_speaker_data = bgp_speaker['bgp_speaker'] ip_version = bgp_speaker_data.get('ip_version') if ip_version and ip_version == 6: err_msg = _("NSXv BGP does not support for IPv6") raise n_exc.InvalidInput(error_message=err_msg) def update_bgp_speaker(self, context, bgp_speaker_id, bgp_speaker): bgp_obj = bgp_speaker['bgp_speaker'] old_speaker_info = self._plugin.get_bgp_speaker(context, bgp_speaker_id) enabled_state = old_speaker_info['advertise_tenant_networks'] new_enabled_state = bgp_obj.get('advertise_tenant_networks', enabled_state) if new_enabled_state == enabled_state: return bgp_bindings = nsxv_db.get_nsxv_bgp_speaker_bindings( context.session, bgp_speaker_id) edge_ids = [bgp_binding['edge_id'] for bgp_binding in bgp_bindings] action = 'Enabling' if new_enabled_state else 'Disabling' LOG.info("%s BGP route redistribution on edges: %s.", action, edge_ids) for edge_id in edge_ids: try: self._nsxv.update_routing_redistribution(edge_id, new_enabled_state) except vcns_exc.VcnsApiException: LOG.warning("Failed to update BGP on edge '%s'.", edge_id) def delete_bgp_speaker(self, context, bgp_speaker_id): bgp_bindings = nsxv_db.get_nsxv_bgp_speaker_bindings( context.session, bgp_speaker_id) self._stop_bgp_on_edges(context, bgp_bindings, bgp_speaker_id) def _validate_bgp_configuration_on_peer_esg(self, bgp_peer): if not bgp_peer.get('esg_id'): return # TBD(roeyc): Validate peer_ip is on subnet bgp_config = self._nsxv.get_routing_bgp_config(bgp_peer['esg_id']) remote_as = bgp_peer['remote_as'] esg_id = bgp_peer['esg_id'] esg_as = bgp_config['bgp'].get('localAS') if not bgp_config['bgp']['enabled']: raise ext_esg_peer.BgpDisabledOnEsgPeer(esg_id=esg_id) if esg_as != int(remote_as): raise ext_esg_peer.EsgRemoteASDoNotMatch(remote_as=remote_as, esg_id=esg_id, esg_as=esg_as) h, resp = self._nsxv.vcns.get_interfaces(esg_id) for iface in resp['vnics']: address_groups = iface['addressGroups']['addressGroups'] matching_iface = [ag for ag in address_groups if ag['primaryAddress'] == bgp_peer['peer_ip']] if matching_iface: break else: raise ext_esg_peer.EsgInternalIfaceDoesNotMatch(esg_id=esg_id) def create_bgp_peer(self, context, bgp_peer): bgp_peer = bgp_peer['bgp_peer'] remote_ip = bgp_peer['peer_ip'] if not netaddr.valid_ipv4(remote_ip): err_msg = _("NSXv BGP does not support for IPv6") raise n_exc.InvalidInput(error_message=err_msg) self._validate_bgp_configuration_on_peer_esg(bgp_peer) def update_bgp_peer(self, context, bgp_peer_id, bgp_peer): password = bgp_peer['bgp_peer'].get('password') old_bgp_peer = self._plugin.get_bgp_peer(context, bgp_peer_id) # Only password update is relevant for backend. if old_bgp_peer['password'] == password: return bgp_speaker_ids = self._get_bgp_speakers_by_bgp_peer(context, bgp_peer_id) # Update the password for the old bgp peer and update NSX old_bgp_peer['password'] = password neighbour = bgp_neighbour_from_peer(old_bgp_peer) for bgp_speaker_id in bgp_speaker_ids: with locking.LockManager.get_lock(bgp_speaker_id): peers = self._plugin.get_bgp_peers_by_bgp_speaker( context, bgp_speaker_id) if bgp_peer_id not in [p['id'] for p in peers]: continue bgp_bindings = nsxv_db.get_nsxv_bgp_speaker_bindings( context.session, bgp_speaker_id) for binding in bgp_bindings: try: # Neighbours are identified by their ip address self._nsxv.update_bgp_neighbours(binding['edge_id'], [neighbour], [neighbour]) except vcns_exc.VcnsApiException: LOG.error("Failed to update BGP neighbor '%s' on " "edge '%s'", old_bgp_peer['peer_ip'], binding['edge_id']) def _validate_bgp_peer(self, context, bgp_speaker_id, new_peer_id): new_peer = self._plugin._get_bgp_peer(context, new_peer_id) peers = self._plugin._get_bgp_peers_by_bgp_speaker_binding( context, bgp_speaker_id) self._plugin._validate_peer_ips(bgp_speaker_id, peers, new_peer) def add_bgp_peer(self, context, bgp_speaker_id, bgp_peer_info): bgp_peer_id = self._plugin._get_id_for(bgp_peer_info, 'bgp_peer_id') bgp_peer_obj = self._plugin.get_bgp_peer(context, bgp_peer_id) nbr = bgp_neighbour_from_peer(bgp_peer_obj) bgp_bindings = nsxv_db.get_nsxv_bgp_speaker_bindings(context.session, bgp_speaker_id) self._validate_bgp_peer(context, bgp_speaker_id, bgp_peer_obj['id']) speaker = self._plugin.get_bgp_speaker(context, bgp_speaker_id) # list of tenant edge routers to be removed as bgp-neighbours to this # peer if it's associated with specific ESG. neighbours = [] for binding in bgp_bindings: try: self._nsxv.add_bgp_neighbours(binding['edge_id'], [nbr]) except vcns_exc.VcnsApiException: LOG.error("Failed to add BGP neighbour on '%s'", binding['edge_id']) else: gw_nbr = gw_bgp_neighbour(binding['bgp_identifier'], speaker['local_as'], bgp_peer_obj['password']) neighbours.append(gw_nbr) LOG.debug("Succesfully added BGP neighbor '%s' on '%s'", bgp_peer_obj['peer_ip'], binding['edge_id']) if bgp_peer_obj.get('esg_id'): edge_gw = bgp_peer_obj['esg_id'] try: self._nsxv.add_bgp_neighbours(edge_gw, neighbours) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.error("Failed to add BGP neighbour on GW Edge '%s'", edge_gw) def remove_bgp_peer(self, context, bgp_speaker_id, bgp_peer_info): bgp_peer_id = bgp_peer_info['bgp_peer_id'] bgp_peer_obj = self._plugin.get_bgp_peer(context, bgp_peer_id) nbr = bgp_neighbour_from_peer(bgp_peer_obj) bgp_bindings = nsxv_db.get_nsxv_bgp_speaker_bindings( context.session, bgp_speaker_id) speaker = self._plugin.get_bgp_speaker(context, bgp_speaker_id) # list of tenant edge routers to be removed as bgp-neighbours to this # peer if it's associated with specific ESG. neighbours = [] for binding in bgp_bindings: try: self._nsxv.remove_bgp_neighbours(binding['edge_id'], [nbr]) except vcns_exc.VcnsApiException: LOG.error("Failed to remove BGP neighbour on '%s'", binding['edge_id']) else: gw_nbr = gw_bgp_neighbour(binding['bgp_identifier'], speaker['local_as'], bgp_peer_obj['password']) neighbours.append(gw_nbr) LOG.debug("Succesfully removed BGP neighbor '%s' on '%s'", bgp_peer_obj['peer_ip'], binding['edge_id']) if bgp_peer_obj.get('esg_id'): edge_gw = bgp_peer_obj['esg_id'] try: self._nsxv.remove_bgp_neighbours(edge_gw, neighbours) except vcns_exc.VcnsApiException: LOG.error("Failed to remove BGP neighbour on GW Edge '%s'", edge_gw) def _validate_gateway_network(self, context, speaker_id, network_id): ext_net = self._core_plugin.get_network(context, network_id) if not ext_net.get(extnet_apidef.EXTERNAL): raise nsx_exc.NsxBgpNetworkNotExternal(net_id=network_id) if not ext_net['subnets']: raise nsx_exc.NsxBgpGatewayNetworkHasNoSubnets(net_id=network_id) # REVISIT(roeyc): Currently not allowing more than one bgp speaker per # gateway network. speakers_on_network = self._plugin._bgp_speakers_for_gateway_network( context, network_id) if speakers_on_network: raise bgp_ext.BgpSpeakerNetworkBindingError( network_id=network_id, bgp_speaker_id=speakers_on_network[0]['id']) subnet_id = ext_net['subnets'][0] ext_subnet = self._core_plugin.get_subnet(context, subnet_id) if ext_subnet.get('gateway_ip'): raise ext_esg_peer.ExternalSubnetHasGW( network_id=network_id, subnet_id=subnet_id) if not ext_net[address_scope.IPV4_ADDRESS_SCOPE]: raise nsx_exc.NsxBgpSpeakerUnableToAddGatewayNetwork( network_id=network_id, bgp_speaker_id=speaker_id) return True def add_gateway_network(self, context, bgp_speaker_id, network_info): gateway_network_id = network_info['network_id'] if not self._validate_gateway_network(context, bgp_speaker_id, gateway_network_id): return edge_router_dict = self._get_dynamic_routing_edge_list( context, gateway_network_id, bgp_speaker_id) speaker = self._plugin.get_bgp_speaker(context, bgp_speaker_id) bgp_peers = self._plugin.get_bgp_peers_by_bgp_speaker( context, bgp_speaker_id) local_as = speaker['local_as'] peers = [] for edge_id, edge_router_config in edge_router_dict.items(): router_ids = edge_router_config['no_snat_routers'] advertise_static_routes = ( edge_router_config['advertise_static_routes']) subnets = self._query_tenant_subnets(context, router_ids) # router_id here is in IP address format and is required for # the BGP configuration. bgp_identifier = edge_router_config['bgp_identifier'] try: self._start_bgp_on_edge(context, edge_id, speaker, bgp_peers, bgp_identifier, subnets, advertise_static_routes) except vcns_exc.VcnsApiException: LOG.error("Failed to configure BGP speaker %s on edge '%s'.", bgp_speaker_id, edge_id) else: peers.append(bgp_identifier) for edge_gw, password in [(peer['esg_id'], peer['password']) for peer in bgp_peers if peer.get('esg_id')]: neighbours = [gw_bgp_neighbour(bgp_id, local_as, password) for bgp_id in peers] try: self._nsxv.add_bgp_neighbours(edge_gw, neighbours) except vcns_exc.VcnsApiException: LOG.error("Failed to add BGP neighbour on GW Edge '%s'", edge_gw) def _start_bgp_on_edge(self, context, edge_id, speaker, bgp_peers, bgp_identifier, subnets, advertise_static_routes): enabled_state = speaker['advertise_tenant_networks'] local_as = speaker['local_as'] prefixes, redis_rules = self._get_prefixes_and_redistribution_rules( subnets, advertise_static_routes) bgp_neighbours = [bgp_neighbour_from_peer(bgp_peer) for bgp_peer in bgp_peers] try: self._nsxv.add_bgp_speaker_config(edge_id, bgp_identifier, local_as, enabled_state, bgp_neighbours, prefixes, redis_rules) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.error("Failed to configure BGP speaker '%s' on edge '%s'.", speaker['id'], edge_id) else: nsxv_db.add_nsxv_bgp_speaker_binding(context.session, edge_id, speaker['id'], bgp_identifier) def _stop_bgp_on_edges(self, context, bgp_bindings, speaker_id): peers_to_remove = [] speaker = self._plugin.get_bgp_speaker(context, speaker_id) local_as = speaker['local_as'] for bgp_binding in bgp_bindings: edge_id = bgp_binding['edge_id'] try: self._nsxv.delete_bgp_speaker_config(edge_id) except vcns_exc.VcnsApiException: LOG.error("Failed to delete BGP speaker '%s' config on edge " "'%s'.", speaker_id, edge_id) else: nsxv_db.delete_nsxv_bgp_speaker_binding(context.session, edge_id) peers_to_remove.append(bgp_binding['bgp_identifier']) # We should also remove all bgp neighbours on gw-edges which # corresponds with tenant routers that are associated with this bgp # speaker. bgp_peers = self._plugin.get_bgp_peers_by_bgp_speaker(context, speaker_id) gw_edges = [(peer['esg_id'], peer['password']) for peer in bgp_peers if peer.get('esg_id')] for gw_edge, password in gw_edges: neighbours_to_remove = [gw_bgp_neighbour(bgp_identifier, local_as, password) for bgp_identifier in peers_to_remove] try: self._nsxv.remove_bgp_neighbours(gw_edge, neighbours_to_remove) except vcns_exc.VcnsApiException: LOG.error("Failed to remove BGP neighbour on GW edge '%s'.", gw_edge) def remove_gateway_network(self, context, bgp_speaker_id, network_info): bgp_bindings = nsxv_db.get_nsxv_bgp_speaker_bindings( context.session, bgp_speaker_id) self._stop_bgp_on_edges(context, bgp_bindings, bgp_speaker_id) def _update_edge_bgp_identifier(self, context, bgp_binding, speaker, new_bgp_identifier): local_as = speaker['local_as'] bgp_peers = self._plugin.get_bgp_peers_by_bgp_speaker(context, speaker['id']) self._nsxv.update_router_id(bgp_binding['edge_id'], new_bgp_identifier) for gw_edge_id, password in [(peer['esg_id'], peer['password']) for peer in bgp_peers if peer.get('esg_id')]: nbr_to_remove = gw_bgp_neighbour(bgp_binding['bgp_identifier'], local_as, password) nbr_to_add = gw_bgp_neighbour(new_bgp_identifier, local_as, password) self._nsxv.update_bgp_neighbours(gw_edge_id, [nbr_to_add], [nbr_to_remove]) with context.session.begin(subtransactions=True): bgp_binding['bgp_identifier'] = new_bgp_identifier def process_router_gw_port_update(self, context, speaker, router, updated_port): router_id = router['id'] gw_fixed_ip = router.gw_port['fixed_ips'][0]['ip_address'] edge_id, advertise_static_routes = ( self._get_router_edge_info(context, router_id)) if not edge_id: # shared router is not attached on any edge return bgp_binding = nsxv_db.get_nsxv_bgp_speaker_binding( context.session, edge_id) if bgp_binding: new_fixed_ip = updated_port['fixed_ips'][0]['ip_address'] fixed_ip_updated = gw_fixed_ip != new_fixed_ip subnets = self._query_tenant_subnets(context, [router_id]) prefixes, redis_rules = ( self._get_prefixes_and_redistribution_rules( subnets, advertise_static_routes)) # Handle possible snat/no-nat update if router.enable_snat: self._nsxv.remove_bgp_redistribution_rules(edge_id, prefixes) else: self._nsxv.add_bgp_redistribution_rules(edge_id, prefixes, redis_rules) if bgp_binding['bgp_identifier'] == gw_fixed_ip: if fixed_ip_updated: self._update_edge_bgp_identifier(context, bgp_binding, speaker, new_fixed_ip) def enable_bgp_on_router(self, context, speaker, router_id): local_as = speaker['local_as'] edge_id, advertise_static_routes = ( self._get_router_edge_info(context, router_id)) if not edge_id: # shared router is not attached on any edge return router = self._core_plugin._get_router(context, router_id) subnets = self._query_tenant_subnets(context, [router_id]) bgp_peers = self._plugin.get_bgp_peers_by_bgp_speaker( context, speaker['id']) bgp_binding = nsxv_db.get_nsxv_bgp_speaker_binding( context.session, edge_id) if bgp_binding and subnets: # Edge already configured with BGP (e.g - shared router edge), # Add the router attached subnets. if router.enable_snat: prefixes = [self.prefix_name(subnet['id']) for subnet in subnets] self._nsxv.remove_bgp_redistribution_rules(edge_id, prefixes) else: prefixes, redis_rules = ( self._get_prefixes_and_redistribution_rules( subnets, advertise_static_routes)) self._nsxv.add_bgp_redistribution_rules(edge_id, prefixes, redis_rules) elif not bgp_binding: if router.enable_snat: subnets = [] bgp_identifier = router.gw_port['fixed_ips'][0]['ip_address'] self._start_bgp_on_edge(context, edge_id, speaker, bgp_peers, bgp_identifier, subnets, advertise_static_routes) for gw_edge_id, password in [(peer['esg_id'], peer['password']) for peer in bgp_peers if peer.get('esg_id')]: nbr = gw_bgp_neighbour(bgp_identifier, local_as, password) self._nsxv.add_bgp_neighbours(gw_edge_id, [nbr]) def disable_bgp_on_router(self, context, speaker, router_id, gw_ip, edge_id=None): speaker = self._plugin.get_bgp_speaker(context, speaker['id']) current_edge_id, advertise_static_routes = ( self._get_router_edge_info(context, router_id)) edge_id = edge_id or current_edge_id if not edge_id: return bgp_binding = nsxv_db.get_nsxv_bgp_speaker_binding(context.session, edge_id) if not bgp_binding: return # Need to ensure that we do not use the metadata IP's md_proxy = self._get_md_proxy_for_router(context, router_id) routers_ids = ( self._core_plugin.edge_manager.get_routers_on_same_edge( context, router_id)) routers_ids.remove(router_id) # We need to find out what other routers are hosted on the edges and # whether they have a gw addresses that could replace the current # bgp-identifier (if required). filters = {'device_owner': [n_const.DEVICE_OWNER_ROUTER_GW], 'device_id': routers_ids} edge_gw_ports = self._core_plugin.get_ports(context, filters=filters) alt_bgp_identifiers = [ p['fixed_ips'][0]['ip_address'] for p in edge_gw_ports if (not md_proxy or not md_proxy.is_md_subnet( p['fixed_ips'][0]['subnet_id']))] if alt_bgp_identifiers: # Shared router, only remove prefixes and redistribution # rules. subnets = self._query_tenant_subnets(context, [router_id]) prefixes = [self.prefix_name(subnet['id']) for subnet in subnets] self._nsxv.remove_bgp_redistribution_rules(edge_id, prefixes) if bgp_binding['bgp_identifier'] == gw_ip: self._update_edge_bgp_identifier(context, bgp_binding, speaker, alt_bgp_identifiers[0]) else: self._stop_bgp_on_edges(context, [bgp_binding], speaker['id']) def advertise_subnet(self, context, speaker_id, router_id, subnet): router = self._core_plugin._get_router(context, router_id) if router.enable_snat: # Do nothing, by default, only when advertisement is needed we add # a new redistribution rule return edge_id, advertise_static_routes = ( self._get_router_edge_info(context, router_id)) if not edge_id: # shared router is not attached on any edge return prefixes, redis_rules = self._get_prefixes_and_redistribution_rules( [subnet], advertise_static_routes) self._nsxv.add_bgp_redistribution_rules(edge_id, prefixes, redis_rules) def withdraw_subnet(self, context, speaker_id, router_id, subnet_id): router = self._core_plugin._get_router(context, router_id) if router.enable_snat: # Do nothing, by default, only when advertisement is needed we add # a new redistribution rule return edge_id, advertise_static_routes = ( self._get_router_edge_info(context, router_id)) prefix_name = self.prefix_name(subnet_id) self._nsxv.remove_bgp_redistribution_rules(edge_id, [prefix_name]) vmware-nsx-12.0.1/vmware_nsx/services/qos/0000775000175100017510000000000013244524600020544 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/qos/nsx_v3/0000775000175100017510000000000013244524600021764 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/qos/nsx_v3/utils.py0000666000175100017510000001756613244523345023524 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron_lib.api import validators from neutron_lib import constants as n_consts from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_lib.services.qos import constants as qos_consts from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import db as nsx_db from vmware_nsx.extensions import projectpluginmap LOG = logging.getLogger(__name__) MAX_KBPS_MIN_VALUE = 1024 # The max limit is calculated so that the value sent to the backed will # be smaller than 2**31 MAX_BURST_MAX_VALUE = int((2 ** 31 - 1) / 128) class QosNotificationsHandler(object): def __init__(self): super(QosNotificationsHandler, self).__init__() self._core_plugin = None @property def core_plugin(self): if not self._core_plugin: self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin(): # get the plugin that match this driver self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_T) return self._core_plugin @property def _nsxlib_qos(self): return self.core_plugin.nsxlib.qos_switching_profile def _get_tags(self, context, policy): policy_dict = {'id': policy.id, 'tenant_id': policy.tenant_id} return self._nsxlib_qos.build_v3_tags_payload( policy_dict, resource_type='os-neutron-qos-id', project_name=context.tenant_name) def create_policy(self, context, policy): policy_id = policy.id tags = self._get_tags(context, policy) result = self._nsxlib_qos.create( tags=tags, name=policy.name, description=policy.description) if not result or not validators.is_attr_set(result.get('id')): msg = _("Unable to create QoS switching profile on the backend") raise nsx_exc.NsxPluginException(err_msg=msg) profile_id = result['id'] # Add the mapping entry of the policy_id <-> profile_id nsx_db.add_qos_policy_profile_mapping(context.session, policy_id, profile_id) def delete_policy(self, context, policy_id): profile_id = nsx_db.get_switch_profile_by_qos_policy( context.session, policy_id) # delete the profile id from the backend and the DB self._nsxlib_qos.delete(profile_id) nsx_db.delete_qos_policy_profile_mapping( context.session, policy_id) def update_policy(self, context, policy_id, policy): profile_id = nsx_db.get_switch_profile_by_qos_policy( context.session, policy_id) tags = self._get_tags(context, policy) self._nsxlib_qos.update( profile_id, tags=tags, name=policy.name, description=policy.description) def _validate_bw_values(self, bw_rule): """Validate that the configured values are allowed by the NSX backend. Since failing the action from the notification callback is not possible, just log the warning and use the minimal/maximal values. """ # Validate the max bandwidth value minimum value # (max value is above what neutron allows so no need to check it) if (bw_rule.max_kbps < MAX_KBPS_MIN_VALUE): msg = (_("Invalid input for max_kbps. " "The minimal legal value is %s") % MAX_KBPS_MIN_VALUE) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) # validate the burst size value max value # (max value is 0, and neutron already validates this) if (bw_rule.max_burst_kbps > MAX_BURST_MAX_VALUE): msg = (_("Invalid input for burst_size. " "The maximal legal value is %s") % MAX_BURST_MAX_VALUE) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _get_bw_values_from_rule(self, bw_rule): """Translate the neutron bandwidth_limit_rule values, into the values expected by the NSX-v3 QoS switch profile, and validate that those are legal """ if bw_rule: shaping_enabled = True # translate kbps -> bytes burst_size = int(bw_rule.max_burst_kbps) * 128 # translate kbps -> Mbps average_bandwidth = int(round(float(bw_rule.max_kbps) / 1024)) # peakBandwidth: a Multiplying on the average BW # because the neutron qos configuration supports # only 1 value peak_bandwidth = int(round(average_bandwidth * cfg.CONF.NSX.qos_peak_bw_multiplier)) else: shaping_enabled = False burst_size = None peak_bandwidth = None average_bandwidth = None return shaping_enabled, burst_size, peak_bandwidth, average_bandwidth def _get_dscp_values_from_rule(self, dscp_rule): """Translate the neutron DSCP marking rule values, into the values expected by the NSX-v3 QoS switch profile """ if dscp_rule: qos_marking = 'untrusted' dscp = dscp_rule.dscp_mark else: qos_marking = 'trusted' dscp = 0 return qos_marking, dscp def update_policy_rules(self, context, policy_id, rules): """Update the QoS switch profile with the BW limitations and DSCP marking configuration """ profile_id = nsx_db.get_switch_profile_by_qos_policy( context.session, policy_id) ingress_bw_rule = None egress_bw_rule = None dscp_rule = None for rule in rules: if rule.rule_type == qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: if rule.direction == n_consts.EGRESS_DIRECTION: egress_bw_rule = rule else: ingress_bw_rule = rule else: dscp_rule = rule # the NSX direction is opposite to the neutron direction (ingress_bw_enabled, ingress_burst_size, ingress_peak_bw, ingress_average_bw) = self._get_bw_values_from_rule(egress_bw_rule) (egress_bw_enabled, egress_burst_size, egress_peak_bw, egress_average_bw) = self._get_bw_values_from_rule(ingress_bw_rule) qos_marking, dscp = self._get_dscp_values_from_rule(dscp_rule) self._nsxlib_qos.set_profile_shaping( profile_id, ingress_bw_enabled=ingress_bw_enabled, ingress_burst_size=ingress_burst_size, ingress_peak_bandwidth=ingress_peak_bw, ingress_average_bandwidth=ingress_average_bw, egress_bw_enabled=egress_bw_enabled, egress_burst_size=egress_burst_size, egress_peak_bandwidth=egress_peak_bw, egress_average_bandwidth=egress_average_bw, qos_marking=qos_marking, dscp=dscp) def validate_policy_rule(self, context, policy_id, rule): """Raise an exception if the rule values are not supported""" if rule.rule_type == qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: self._validate_bw_values(rule) vmware-nsx-12.0.1/vmware_nsx/services/qos/nsx_v3/__init__.py0000666000175100017510000000000013244523345024072 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/qos/nsx_v3/message_queue.py0000666000175100017510000000207613244523345025202 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.services.qos.notification_drivers import message_queue class NsxV3QosNotificationDriver( message_queue.RpcQosServiceNotificationDriver): """NSXv3 message queue service notification driver for QoS. Overriding the create_policy method in order to add a notification message in this case too. """ # The message queue is no longer needed in Pike. # Keeping this class for a while for existing configurations. pass vmware-nsx-12.0.1/vmware_nsx/services/qos/nsx_v3/driver.py0000666000175100017510000000574313244523345023651 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron_lib.db import constants as db_constants from neutron_lib.services.qos import base from neutron_lib.services.qos import constants as qos_consts from oslo_log import log as logging from vmware_nsx.services.qos.nsx_v3 import utils as qos_utils LOG = logging.getLogger(__name__) DRIVER = None SUPPORTED_RULES = { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { qos_consts.MAX_KBPS: { 'type:range': [0, db_constants.DB_INTEGER_MAX_VALUE]}, qos_consts.MAX_BURST: { 'type:range': [0, db_constants.DB_INTEGER_MAX_VALUE]}, qos_consts.DIRECTION: { 'type:values': [constants.EGRESS_DIRECTION, constants.INGRESS_DIRECTION]} }, qos_consts.RULE_TYPE_DSCP_MARKING: { qos_consts.DSCP_MARK: {'type:values': constants.VALID_DSCP_MARKS} } } class NSXv3QosDriver(base.DriverBase): @staticmethod def create(): return NSXv3QosDriver( name='NSXv3QosDriver', vif_types=None, vnic_types=None, supported_rules=SUPPORTED_RULES, requires_rpc_notifications=False) def __init__(self, **kwargs): self.handler = qos_utils.QosNotificationsHandler() super(NSXv3QosDriver, self).__init__(**kwargs) def is_vif_type_compatible(self, vif_type): return True def is_vnic_compatible(self, vnic_type): return True def create_policy(self, context, policy): self.handler.create_policy(context, policy) def update_policy(self, context, policy): if (hasattr(policy, "rules")): self.handler.update_policy_rules( context, policy.id, policy["rules"]) # May also need to update name / description self.handler.update_policy(context, policy.id, policy) def delete_policy(self, context, policy): self.handler.delete_policy(context, policy.id) def update_policy_precommit(self, context, policy): """Validate rules values, before creation""" if (hasattr(policy, "rules")): for rule in policy["rules"]: self.handler.validate_policy_rule(context, policy.id, rule) def register(): """Register the NSX-V3 QoS driver.""" global DRIVER if not DRIVER: DRIVER = NSXv3QosDriver.create() LOG.debug('NSXv3QosDriver QoS driver registered') vmware-nsx-12.0.1/vmware_nsx/services/qos/__init__.py0000666000175100017510000000000013244523345022652 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/qos/common/0000775000175100017510000000000013244524600022034 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/qos/common/utils.py0000666000175100017510000000572313244523345023564 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects.qos import policy as qos_policy from neutron_lib.services.qos import constants as qos_consts def update_network_policy_binding(context, net_id, new_policy_id): # detach the old policy (if exists) from the network old_policy = qos_policy.QosPolicy.get_network_policy( context, net_id) if old_policy: if old_policy.id == new_policy_id: return old_policy.detach_network(net_id) # attach the new policy (if exists) to the network if new_policy_id is not None: new_policy = qos_policy.QosPolicy.get_object( context, id=new_policy_id) if new_policy: new_policy.attach_network(net_id) def update_port_policy_binding(context, port_id, new_policy_id): # detach the old policy (if exists) from the port old_policy = qos_policy.QosPolicy.get_port_policy( context, port_id) if old_policy: if old_policy.id == new_policy_id: return old_policy.detach_port(port_id) # attach the new policy (if exists) to the port if new_policy_id is not None: new_policy = qos_policy.QosPolicy.get_object( context, id=new_policy_id) if new_policy: new_policy.attach_port(port_id) def get_port_policy_id(context, port_id): policy = qos_policy.QosPolicy.get_port_policy( context, port_id) if policy: return policy.id def get_network_policy_id(context, net_id): policy = qos_policy.QosPolicy.get_network_policy( context, net_id) if policy: return policy.id def set_qos_policy_on_new_net(context, net_data, created_net): """Update the network with the assigned or default QoS policy Update the network-qos binding table, and the new network structure """ qos_policy_id = net_data.get(qos_consts.QOS_POLICY_ID) if not qos_policy_id: # try and get the default one qos_obj = qos_policy.QosPolicyDefault.get_object( context, project_id=created_net['project_id']) if qos_obj: qos_policy_id = qos_obj.qos_policy_id if qos_policy_id: # attach the policy to the network in the neutron DB update_network_policy_binding( context, net_data['id'], qos_policy_id) created_net[qos_consts.QOS_POLICY_ID] = qos_policy_id return qos_policy_id vmware-nsx-12.0.1/vmware_nsx/services/qos/common/__init__.py0000666000175100017510000000000013244523345024142 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/qos/nsx_tvd/0000775000175100017510000000000013244524600022231 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/qos/nsx_tvd/__init__.py0000666000175100017510000000000013244523345024337 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/qos/nsx_tvd/plugin.py0000666000175100017510000000164113244523345024112 0ustar zuulzuul00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.services.qos import qos_plugin from vmware_nsx.plugins.nsx import utils as tvd_utils @tvd_utils.filter_plugins class QoSPlugin(qos_plugin.QoSPlugin): """NSX-TV plugin for QoS. This plugin adds separation between T/V instances """ methods_to_separate = ['get_policies'] vmware-nsx-12.0.1/vmware_nsx/services/qos/nsx_v/0000775000175100017510000000000013244524600021701 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/qos/nsx_v/utils.py0000666000175100017510000000733313244523345023430 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_consts from neutron_lib.plugins import constants as plugin_const from neutron_lib.plugins import directory from neutron_lib.services.qos import constants as qos_consts from oslo_config import cfg from oslo_log import log as logging LOG = logging.getLogger(__name__) class NsxVQosBWLimits(object): # Data structure to hold the NSX-V representation # of the neutron QoS Bandwidth rule bandwidthEnabled = False averageBandwidth = 0 peakBandwidth = 0 burstSize = 0 class NsxVQosRule(object): def __init__(self, context=None, qos_policy_id=None): super(NsxVQosRule, self).__init__() self._qos_plugin = None # Data structure to hold the NSX-V representation # of the neutron QoS Bandwidth rule for both directions self.egress = NsxVQosBWLimits() self.ingress = NsxVQosBWLimits() # And data for the DSCP marking rule self.dscpMarkEnabled = False self.dscpMarkValue = 0 if qos_policy_id is not None: self._init_from_policy_id(context, qos_policy_id) def _get_qos_plugin(self): if not self._qos_plugin: self._qos_plugin = directory.get_plugin(plugin_const.QOS) return self._qos_plugin # init the nsx_v qos data (outShapingPolicy) from a neutron qos policy def _init_from_policy_id(self, context, qos_policy_id): self.bandwidthEnabled = False self.dscpMarkEnabled = False # read the neutron policy restrictions if qos_policy_id is not None: plugin = self._get_qos_plugin() policy_obj = plugin.get_policy(context, qos_policy_id) if 'rules' in policy_obj and len(policy_obj['rules']) > 0: for rule_obj in policy_obj['rules']: if (rule_obj['type'] == qos_consts.RULE_TYPE_BANDWIDTH_LIMIT): # BW limit rule for one of the directions if rule_obj['direction'] == n_consts.EGRESS_DIRECTION: dir_obj = self.egress else: dir_obj = self.ingress dir_obj.bandwidthEnabled = True # averageBandwidth: kbps (neutron) -> bps (nsxv) dir_obj.averageBandwidth = rule_obj['max_kbps'] * 1024 # peakBandwidth: a Multiplying on the average BW # because the neutron qos configuration supports # only 1 value dir_obj.peakBandwidth = int(round( dir_obj.averageBandwidth * cfg.CONF.NSX.qos_peak_bw_multiplier)) # burstSize: kbps (neutron) -> Bytes (nsxv) dir_obj.burstSize = rule_obj['max_burst_kbps'] * 128 if rule_obj['type'] == qos_consts.RULE_TYPE_DSCP_MARKING: # DSCP marking rule self.dscpMarkEnabled = True self.dscpMarkValue = rule_obj['dscp_mark'] return self vmware-nsx-12.0.1/vmware_nsx/services/qos/nsx_v/__init__.py0000666000175100017510000000000013244523345024007 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/qos/nsx_v/driver.py0000666000175100017510000000573113244523345023563 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron_lib.db import constants as db_constants from neutron_lib.services.qos import base from neutron_lib.services.qos import constants as qos_consts from oslo_log import log as logging from vmware_nsx.extensions import projectpluginmap LOG = logging.getLogger(__name__) DRIVER = None SUPPORTED_RULES = { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { qos_consts.MAX_KBPS: { 'type:range': [0, db_constants.DB_INTEGER_MAX_VALUE]}, qos_consts.MAX_BURST: { 'type:range': [0, db_constants.DB_INTEGER_MAX_VALUE]}, qos_consts.DIRECTION: { 'type:values': [constants.EGRESS_DIRECTION, constants.INGRESS_DIRECTION]} }, qos_consts.RULE_TYPE_DSCP_MARKING: { qos_consts.DSCP_MARK: {'type:values': constants.VALID_DSCP_MARKS} } } class NSXvQosDriver(base.DriverBase): @staticmethod def create(core_plugin): return NSXvQosDriver( core_plugin, name='NSXvQosDriver', vif_types=None, vnic_types=None, supported_rules=SUPPORTED_RULES, requires_rpc_notifications=False) def __init__(self, core_plugin, **kwargs): super(NSXvQosDriver, self).__init__(**kwargs) self.core_plugin = core_plugin if self.core_plugin.is_tvd_plugin(): # get the plugin that match this driver self.core_plugin = self.core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_V) self.requires_rpc_notifications = False def is_vif_type_compatible(self, vif_type): return True def is_vnic_compatible(self, vnic_type): return True def create_policy(self, context, policy): pass def update_policy(self, context, policy): # get all the bound networks of this policy networks = policy.get_bound_networks() for net_id in networks: # update the new bw limitations for this network self.core_plugin._update_qos_on_backend_network( context, net_id, policy.id) def delete_policy(self, context, policy): pass def register(core_plugin): """Register the NSX-V QoS driver.""" global DRIVER if not DRIVER: DRIVER = NSXvQosDriver.create(core_plugin) LOG.debug('NSXvQosDriver QoS driver registered') vmware-nsx-12.0.1/vmware_nsx/services/qos/nsx_v/plugin.py0000666000175100017510000000253713244523345023567 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.services.qos import qos_plugin from oslo_config import cfg from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc LOG = logging.getLogger(__name__) class NsxVQosPlugin(qos_plugin.QoSPlugin): """Service plugin for VMware NSX-v to implement Neutron's Qos API.""" supported_extension_aliases = ["qos"] def __init__(self): LOG.info("Loading VMware NSX-V Qos Service Plugin") super(NsxVQosPlugin, self).__init__() if not cfg.CONF.nsxv.use_dvs_features: error = _("Cannot use the NSX-V QoS plugin without " "enabling the dvs features") raise nsx_exc.NsxPluginException(err_msg=error) vmware-nsx-12.0.1/vmware_nsx/services/flowclassifier/0000775000175100017510000000000013244524600022756 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/flowclassifier/__init__.py0000666000175100017510000000000013244523345025064 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/flowclassifier/nsx_v/0000775000175100017510000000000013244524600024113 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/flowclassifier/nsx_v/utils.py0000666000175100017510000000500513244523345025634 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.plugins import directory from oslo_log import log as logging LOG = logging.getLogger(__name__) SERVICE_INSERTION_SG_NAME = 'Service Insertion Security Group' SERVICE_INSERTION_RESOURCE = 'Service Insertion' # Using the constant defined here to avoid the need to clone networking-sfc # if the driver is not used. FLOW_CLASSIFIER_EXT = "flow_classifier" class NsxvServiceInsertionHandler(object): def __init__(self, core_plugin): super(NsxvServiceInsertionHandler, self).__init__() self._nsxv = core_plugin.nsx_v self._initialized = False def _initialize_handler(self): if not self._initialized: self._enabled = False self._sg_id = None if self.is_service_insertion_enabled(): self._sg_id = self.get_service_inserion_sg_id() if not self._sg_id: # failed to create the security group or the driver # was not configured LOG.error("Failed to enable service insertion. " "Security group not found.") self._enabled = False else: self._enabled = True self._initialized = True def is_service_insertion_enabled(self): # Note - this cannot be called during init, since the manager is busy if (directory.get_plugin(FLOW_CLASSIFIER_EXT)): return True return False def get_service_inserion_sg_id(self): # Note - this cannot be called during init, since the nsxv flow # classifier driver creates this group return self._nsxv.vcns.get_security_group_id( SERVICE_INSERTION_SG_NAME) @property def enabled(self): self._initialize_handler() return self._enabled @property def sg_id(self): self._initialize_handler() return self._sg_id vmware-nsx-12.0.1/vmware_nsx/services/flowclassifier/nsx_v/__init__.py0000666000175100017510000000000013244523345026221 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/services/flowclassifier/nsx_v/driver.py0000666000175100017510000003736513244523345026005 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import xml.etree.ElementTree as et from networking_sfc.extensions import flowclassifier from networking_sfc.services.flowclassifier.common import exceptions as exc from networking_sfc.services.flowclassifier.drivers import base as fc_driver from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import context as n_context from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.common import config # noqa from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.plugins.nsx_v.vshield import vcns as nsxv_api from vmware_nsx.plugins.nsx_v.vshield import vcns_driver from vmware_nsx.services.flowclassifier.nsx_v import utils as fc_utils LOG = logging.getLogger(__name__) REDIRECT_FW_SECTION_NAME = 'OS Flow Classifier Rules' class NsxvFlowClassifierDriver(fc_driver.FlowClassifierDriverBase): """FlowClassifier Driver For NSX-V.""" _redirect_section_id = None def initialize(self): self._nsxv = vcns_driver.VcnsDriver(None) self.init_profile_id() self.init_security_group() self.init_security_group_in_profile() # register an event to the end of the init to handle the first upgrade if self._is_new_security_group: registry.subscribe(self.init_complete, resources.PROCESS, events.BEFORE_SPAWN) def init_profile_id(self): """Init the service insertion profile ID Initialize the profile id that should be assigned to the redirect rules from the nsx configuration and verify that it exists on backend. """ if not cfg.CONF.nsxv.service_insertion_profile_id: raise cfg.RequiredOptError("service_insertion_profile_id", group=cfg.OptGroup('nsxv')) self._profile_id = cfg.CONF.nsxv.service_insertion_profile_id # Verify that this moref exists if not self._nsxv.vcns.validate_inventory(self._profile_id): error = (_("Configured service profile ID: %s not found") % self._profile_id) raise nsx_exc.NsxPluginException(err_msg=error) def init_security_group(self): """Init the service insertion security group Look for the service insertion security group in the backend. If it was not found - create it This security group will contain all the VMs vnics that should be inspected by the redirect rules """ # check if this group exist, and create it if not. sg_name = fc_utils.SERVICE_INSERTION_SG_NAME sg_id = self._nsxv.vcns.get_security_group_id(sg_name) self._is_new_security_group = False if not sg_id: description = ("OpenStack Service Insertion Security Group, " "managed by Neutron nsx-v plugin.") sg = {"securitygroup": {"name": sg_name, "description": description}} h, sg_id = ( self._nsxv.vcns.create_security_group(sg)) self._is_new_security_group = True self._security_group_id = sg_id def init_security_group_in_profile(self): """Attach the security group to the service profile """ data = self._nsxv.vcns.get_service_insertion_profile(self._profile_id) if data and len(data) > 1: profile = et.fromstring(data[1]) profile_binding = profile.find('serviceProfileBinding') sec_groups = profile_binding.find('securityGroups') for sec in sec_groups.iter('string'): if sec.text == self._security_group_id: # Already there return # add the security group to the binding et.SubElement(sec_groups, 'string').text = self._security_group_id self._nsxv.vcns.update_service_insertion_profile_binding( self._profile_id, et.tostring(profile_binding, encoding="us-ascii")) def init_complete(self, resource, event, trigger, payload=None): if self._is_new_security_group: # add existing VMs to the new security group # This code must run after init is done core_plugin = directory.get_plugin() core_plugin.add_vms_to_service_insertion( self._security_group_id) # Add the first flow classifier entry if cfg.CONF.nsxv.service_insertion_redirect_all: self.add_any_any_redirect_rule() def add_any_any_redirect_rule(self): """Add an any->any flow classifier entry Add 1 flow classifier entry that will redirect all the traffic to the security partner The user will be able to delete/change it later """ context = n_context.get_admin_context() fc_plugin = directory.get_plugin(flowclassifier.FLOW_CLASSIFIER_EXT) # first check that there is no other flow classifier entry defined: fcs = fc_plugin.get_flow_classifiers(context) if len(fcs) > 0: return # Create any->any rule fc = {'name': 'redirect_all', 'description': 'Redirect all traffic', 'tenant_id': nsxv_constants.INTERNAL_TENANT_ID, 'l7_parameters': {}, 'ethertype': 'IPv4', 'protocol': None, 'source_port_range_min': None, 'source_port_range_max': None, 'destination_port_range_min': None, 'destination_port_range_max': None, 'source_ip_prefix': None, 'destination_ip_prefix': None, 'logical_source_port': None, 'logical_destination_port': None } fc_plugin.create_flow_classifier(context, {'flow_classifier': fc}) def get_redirect_fw_section_id(self): if not self._redirect_section_id: # try to find it self._redirect_section_id = self._nsxv.vcns.get_section_id( REDIRECT_FW_SECTION_NAME) if not self._redirect_section_id: # create it for the first time section = et.Element('section') section.attrib['name'] = REDIRECT_FW_SECTION_NAME self._nsxv.vcns.create_redirect_section(et.tostring(section)) self._redirect_section_id = self._nsxv.vcns.get_section_id( REDIRECT_FW_SECTION_NAME) return self._redirect_section_id def get_redirect_fw_section_uri(self): return '%s/%s/%s' % (nsxv_api.FIREWALL_PREFIX, nsxv_api.FIREWALL_REDIRECT_SEC_TYPE, self.get_redirect_fw_section_id()) def get_redirect_fw_section_from_backend(self): section_uri = self.get_redirect_fw_section_uri() section_resp = self._nsxv.vcns.get_section(section_uri) if section_resp and len(section_resp) > 1: xml_section = section_resp[1] return et.fromstring(xml_section) def update_redirect_section_in_backed(self, section): section_uri = self.get_redirect_fw_section_uri() self._nsxv.vcns.update_section( section_uri, et.tostring(section, encoding="us-ascii"), None) def _rule_ip_type(self, flow_classifier): if flow_classifier.get('ethertype') == 'IPv6': return 'Ipv6Address' return 'Ipv4Address' def _rule_ports(self, type, flow_classifier): min_port = flow_classifier.get(type + '_port_range_min') max_port = flow_classifier.get(type + '_port_range_max') return self._ports_list(min_port, max_port) def _ports_list(self, min_port, max_port): """Return a string representing the port/range""" if min_port == max_port: return str(min_port) return "%s-%s" % (min_port, max_port) def _rule_name(self, flow_classifier): # The name of the rule will include the name & id of the classifier # so we can later find it in order to update/delete it. # Both the flow classifier DB & the backend has max name length of 255 # so we may have to trim the name a bit return (flow_classifier.get('name')[:200] + '-' + flow_classifier.get('id')) def _is_the_same_rule(self, rule, flow_classifier_id): return rule.find('name').text.endswith(flow_classifier_id) def init_redirect_fw_rule(self, redirect_rule, flow_classifier): et.SubElement(redirect_rule, 'name').text = self._rule_name( flow_classifier) et.SubElement(redirect_rule, 'action').text = 'redirect' et.SubElement(redirect_rule, 'direction').text = 'inout' si_profile = et.SubElement(redirect_rule, 'siProfile') et.SubElement(si_profile, 'objectId').text = self._profile_id et.SubElement(redirect_rule, 'packetType').text = flow_classifier.get( 'ethertype').lower() # init the source & destination if flow_classifier.get('source_ip_prefix'): sources = et.SubElement(redirect_rule, 'sources') sources.attrib['excluded'] = 'false' source = et.SubElement(sources, 'source') et.SubElement(source, 'type').text = self._rule_ip_type( flow_classifier) et.SubElement(source, 'value').text = flow_classifier.get( 'source_ip_prefix') if flow_classifier.get('destination_ip_prefix'): destinations = et.SubElement(redirect_rule, 'destinations') destinations.attrib['excluded'] = 'false' destination = et.SubElement(destinations, 'destination') et.SubElement(destination, 'type').text = self._rule_ip_type( flow_classifier) et.SubElement(destination, 'value').text = flow_classifier.get( 'destination_ip_prefix') # init the service if (flow_classifier.get('destination_port_range_min') or flow_classifier.get('source_port_range_min')): services = et.SubElement(redirect_rule, 'services') service = et.SubElement(services, 'service') et.SubElement(service, 'isValid').text = 'true' if flow_classifier.get('source_port_range_min'): source_port = et.SubElement(service, 'sourcePort') source_port.text = self._rule_ports('source', flow_classifier) if flow_classifier.get('destination_port_range_min'): dest_port = et.SubElement(service, 'destinationPort') dest_port.text = self._rule_ports('destination', flow_classifier) prot = et.SubElement(service, 'protocolName') prot.text = flow_classifier.get('protocol').upper() # Add the classifier description if flow_classifier.get('description'): notes = et.SubElement(redirect_rule, 'notes') notes.text = flow_classifier.get('description') def _loc_fw_section(self): return locking.LockManager.get_lock('redirect-fw-section') @log_helpers.log_method_call def create_flow_classifier(self, context): """Create a redirect rule at the backend """ flow_classifier = context.current with self._loc_fw_section(): section = self.get_redirect_fw_section_from_backend() new_rule = et.SubElement(section, 'rule') self.init_redirect_fw_rule(new_rule, flow_classifier) self.update_redirect_section_in_backed(section) @log_helpers.log_method_call def update_flow_classifier(self, context): """Update the backend redirect rule """ flow_classifier = context.current with self._loc_fw_section(): section = self.get_redirect_fw_section_from_backend() redirect_rule = None for rule in section.iter('rule'): if self._is_the_same_rule(rule, flow_classifier['id']): redirect_rule = rule break if redirect_rule is None: msg = _("Failed to find redirect rule %s " "on backed") % flow_classifier['id'] raise exc.FlowClassifierException(message=msg) else: # The flowclassifier plugin currently supports updating only # name or description name = redirect_rule.find('name') name.text = self._rule_name(flow_classifier) notes = redirect_rule.find('notes') notes.text = flow_classifier.get('description') or '' self.update_redirect_section_in_backed(section) @log_helpers.log_method_call def delete_flow_classifier(self, context): """Delete the backend redirect rule """ flow_classifier_id = context.current['id'] with self._loc_fw_section(): section = self.get_redirect_fw_section_from_backend() redirect_rule = None for rule in section.iter('rule'): if self._is_the_same_rule(rule, flow_classifier_id): redirect_rule = rule section.remove(redirect_rule) break if redirect_rule is None: LOG.error("Failed to delete redirect rule %s: " "Could not find rule on backed", flow_classifier_id) # should not fail the deletion else: self.update_redirect_section_in_backed(section) @log_helpers.log_method_call def create_flow_classifier_precommit(self, context): """Validate the flow classifier data before committing the transaction The NSX-v redirect rules does not support: - logical ports - l7 parameters - source ports range / destination port range with more than 15 ports """ flow_classifier = context.current # Logical source port logical_source_port = flow_classifier['logical_source_port'] if logical_source_port is not None: msg = _('The NSXv driver does not support setting ' 'logical source port in FlowClassifier') raise exc.FlowClassifierBadRequest(message=msg) # Logical destination port logical_destination_port = flow_classifier['logical_destination_port'] if logical_destination_port is not None: msg = _('The NSXv driver does not support setting ' 'logical destination port in FlowClassifier') raise exc.FlowClassifierBadRequest(message=msg) # L7 parameters l7_params = flow_classifier['l7_parameters'] if l7_params is not None and len(l7_params.keys()) > 0: msg = _('The NSXv driver does not support setting ' 'L7 parameters in FlowClassifier') raise exc.FlowClassifierBadRequest(message=msg) vmware-nsx-12.0.1/vmware_nsx/tests/0000775000175100017510000000000013244524600017261 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/__init__.py0000666000175100017510000000000013244523345021367 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/functional/0000775000175100017510000000000013244524600021423 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/functional/requirements.txt0000666000175100017510000000045213244523345024717 0ustar zuulzuul00000000000000# Additional requirements for functional tests # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. psycopg2 PyMySQL>=0.6.2 # MIT License vmware-nsx-12.0.1/vmware_nsx/tests/functional/__init__.py0000666000175100017510000000000013244523345023531 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/0000775000175100017510000000000013244524600020240 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/osc/0000775000175100017510000000000013244524600021024 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/osc/v2/0000775000175100017510000000000013244524600021353 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/osc/v2/test_port.py0000666000175100017510000002641013244523345023762 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import mock from openstackclient.tests.unit.network.v2 import fakes as network_fakes from openstackclient.tests.unit.network.v2 import test_port from openstackclient.tests.unit import utils as tests_utils from vmware_nsx.osc.v2 import port supported_extensions = ('vnic-index', 'provider-security-group', 'mac-learning') class TestCreatePort(test_port.TestCreatePort): def setUp(self): super(TestCreatePort, self).setUp() # Get the command object to test self.cmd = port.NsxCreatePort(self.app, self.namespace) # mock the relevant extensions get_ext = mock.patch('vmware_nsx.osc.v2.utils.get_extensions').start() get_ext.return_value = supported_extensions def _test_create_with_arg_and_val(self, arg_name, arg_val, is_valid=True): self.network.create_port.reset_mock() # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ self._port.name, '--network', self._port.network_id, conv_name, str(arg_val) ] verifylist = [ ('name', self._port.name), ('network', self._port.network_id,), (arg_name, arg_val), ('enable', True), ] if not is_valid: self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) return parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = (self.cmd.take_action(parsed_args)) self.network.create_port.assert_called_once_with(**{ 'admin_state_up': True, 'network_id': self._port.network_id, 'name': self._port.name, arg_name: arg_val, }) ref_columns, ref_data = self._get_common_cols_data(self._port) self.assertEqual(ref_columns, columns) self.assertEqual(ref_data, data) def _test_create_with_vnix_index(self, val, is_valid=True): self._test_create_with_arg_and_val('vnic_index', val, is_valid) def test_create_with_vnic_index(self): self._test_create_with_vnix_index(1) def test_create_with_illegal_vnic_index(self): self._test_create_with_vnix_index('illegal', is_valid=False) def test_create_with_provider_security_group(self): # create a port with 1 provider security group secgroup = network_fakes.FakeSecurityGroup.create_one_security_group() self.network.find_security_group = mock.Mock(return_value=secgroup) arglist = [ '--network', self._port.network_id, '--provider-security-group', secgroup.id, 'test-port', ] verifylist = [ ('network', self._port.network_id,), ('enable', True), ('provider_security_groups', [secgroup.id]), ('name', 'test-port'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = (self.cmd.take_action(parsed_args)) self.network.create_port.assert_called_once_with(**{ 'admin_state_up': True, 'network_id': self._port.network_id, 'provider_security_groups': [secgroup.id], 'name': 'test-port', }) ref_columns, ref_data = self._get_common_cols_data(self._port) self.assertEqual(ref_columns, columns) self.assertEqual(ref_data, data) def test_create_with_provider_security_groups(self): # create a port with few provider security groups sg_1 = network_fakes.FakeSecurityGroup.create_one_security_group() sg_2 = network_fakes.FakeSecurityGroup.create_one_security_group() self.network.find_security_group = mock.Mock(side_effect=[sg_1, sg_2]) arglist = [ '--network', self._port.network_id, '--provider-security-group', sg_1.id, '--provider-security-group', sg_2.id, 'test-port', ] verifylist = [ ('network', self._port.network_id,), ('enable', True), ('provider_security_groups', [sg_1.id, sg_2.id]), ('name', 'test-port'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = (self.cmd.take_action(parsed_args)) self.network.create_port.assert_called_once_with(**{ 'admin_state_up': True, 'network_id': self._port.network_id, 'provider_security_groups': [sg_1.id, sg_2.id], 'name': 'test-port', }) ref_columns, ref_data = self._get_common_cols_data(self._port) self.assertEqual(ref_columns, columns) self.assertEqual(ref_data, data) def test_create_with_provider_security_group_by_name(self): # create a port with 1 provider security group secgroup = network_fakes.FakeSecurityGroup.create_one_security_group() self.network.find_security_group = mock.Mock(return_value=secgroup) arglist = [ '--network', self._port.network_id, '--provider-security-group', secgroup.name, 'test-port', ] verifylist = [ ('network', self._port.network_id,), ('enable', True), ('provider_security_groups', [secgroup.name]), ('name', 'test-port'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = (self.cmd.take_action(parsed_args)) self.network.create_port.assert_called_once_with(**{ 'admin_state_up': True, 'network_id': self._port.network_id, 'provider_security_groups': [secgroup.id], 'name': 'test-port', }) ref_columns, ref_data = self._get_common_cols_data(self._port) self.assertEqual(ref_columns, columns) self.assertEqual(ref_data, data) def _test_create_with_flag_arg( self, arg_name, validate_name, validate_val): self.network.create_port.reset_mock() # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ self._port.name, '--network', self._port.network_id, conv_name ] verifylist = [ ('name', self._port.name), ('network', self._port.network_id,), (arg_name, True), ('enable', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = (self.cmd.take_action(parsed_args)) self.network.create_port.assert_called_once_with(**{ 'admin_state_up': True, 'network_id': self._port.network_id, 'name': self._port.name, validate_name: validate_val, }) ref_columns, ref_data = self._get_common_cols_data(self._port) self.assertEqual(ref_columns, columns) self.assertEqual(ref_data, data) def test_create_with_mac_learning(self): self._test_create_with_flag_arg( 'enable_mac_learning', 'mac_learning_enabled', True) def test_create_with_no_mac_learning(self): self._test_create_with_flag_arg( 'disable_mac_learning', 'mac_learning_enabled', False) class TestSetPort(test_port.TestSetPort): def setUp(self): super(TestSetPort, self).setUp() # Get the command object to test self.cmd = port.NsxSetPort(self.app, self.namespace) # mock the relevant extensions get_ext = mock.patch('vmware_nsx.osc.v2.utils.get_extensions').start() get_ext.return_value = supported_extensions def _test_set_with_arg_and_val(self, arg_name, arg_val, is_valid=True): self.network.update_port.reset_mock() # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ self._port.name, conv_name, str(arg_val) ] verifylist = [ ('port', self._port.name), (arg_name, arg_val) ] if not is_valid: self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) return parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) attrs = {arg_name: arg_val} self.network.update_port.assert_called_once_with( self._port, **attrs) self.assertIsNone(result) def _test_set_Vnic_index(self, val, is_valid=True): self._test_set_with_arg_and_val('vnic_index', val, is_valid) def test_set_vnic_index(self): self._test_set_Vnic_index(1) def test_set_illegal_vnic_index(self): # check illegal index self._test_set_Vnic_index('illegal', is_valid=False) def test_set_provider_security_group(self): # It is not allowed to change the provider security groups sg = network_fakes.FakeSecurityGroup.create_one_security_group() self.network.find_security_group = mock.Mock(return_value=sg) arglist = [ '--provider-security-group', sg.id, self._port.name, ] verifylist = [ ('provider_security_groups', [sg.id]), ('port', self._port.name), ] self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) def _test_set_with_flag_arg(self, arg_name, validate_name, validate_val, is_valid=True): self.network.update_port.reset_mock() # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ self._port.name, conv_name ] verifylist = [ ('port', self._port.name), (arg_name, True) ] if not is_valid: self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) return parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) attrs = {validate_name: validate_val} self.network.update_port.assert_called_once_with( self._port, **attrs) self.assertIsNone(result) def test_set_with_mac_learning(self): self._test_set_with_flag_arg( 'enable_mac_learning', 'mac_learning_enabled', True) def test_set_with_no_mac_learning(self): self._test_set_with_flag_arg( 'disable_mac_learning', 'mac_learning_enabled', False) vmware-nsx-12.0.1/vmware_nsx/tests/unit/osc/v2/test_router.py0000666000175100017510000001241613244523345024317 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import mock from openstackclient.tests.unit.network.v2 import test_router from openstackclient.tests.unit import utils as tests_utils from vmware_nsx.extensions import routersize from vmware_nsx.extensions import routertype from vmware_nsx.osc.v2 import router supported_extensions = ('nsxv-router-size', 'nsxv-router-type') class TestCreateRouter(test_router.TestCreateRouter): def setUp(self): super(TestCreateRouter, self).setUp() # Get the command object to test self.cmd = router.NsxCreateRouter(self.app, self.namespace) # mock the relevant extensions get_ext = mock.patch('vmware_nsx.osc.v2.utils.get_extensions').start() get_ext.return_value = supported_extensions def _test_create_with_arg_and_val(self, arg_name, arg_val, is_valid=True): self.network.create_router.reset_mock() # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ self.new_router.name, conv_name, arg_val ] verifylist = [ ('name', self.new_router.name), (arg_name, arg_val), ('enable', True), ('distributed', False), ] if not is_valid: self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) return parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = (self.cmd.take_action(parsed_args)) self.network.create_router.assert_called_once_with(**{ 'admin_state_up': True, 'name': self.new_router.name, arg_name: arg_val, }) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def _test_create_with_size(self, size, is_valid=True): self._test_create_with_arg_and_val('router_size', size, is_valid) def test_create_with_sizes(self): # check all router types for rtr_size in routersize.VALID_EDGE_SIZES: self._test_create_with_size(rtr_size) def test_create_with_illegal_size(self): self._test_create_with_size('illegal', is_valid=False) def _test_create_with_type(self, rtr_type, is_valid=True): self._test_create_with_arg_and_val('router_type', rtr_type, is_valid) def test_create_with_types(self): # check all router types for rtr_type in routertype.VALID_TYPES: self._test_create_with_type(rtr_type) def test_create_with_illegal_type(self): self._test_create_with_type('illegal', is_valid=False) class TestSetRouter(test_router.TestSetRouter): def setUp(self): super(TestSetRouter, self).setUp() # Get the command object to test self.cmd = router.NsxSetRouter(self.app, self.namespace) # mock the relevant extensions get_ext = mock.patch('vmware_nsx.osc.v2.utils.get_extensions').start() get_ext.return_value = supported_extensions def _test_set_with_arg_and_val(self, arg_name, arg_val, is_valid=True): self.network.update_router.reset_mock() # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ self._router.name, conv_name, arg_val ] verifylist = [ ('router', self._router.name), (arg_name, arg_val) ] if not is_valid: self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) return parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) attrs = {arg_name: arg_val} self.network.update_router.assert_called_once_with( self._router, **attrs) self.assertIsNone(result) def _test_set_size(self, size, is_valid=True): self._test_set_with_arg_and_val('router_size', size, is_valid) def test_set_sizes(self): # check all router types for rtr_size in routersize.VALID_EDGE_SIZES: self._test_set_size(rtr_size) def test_set_illegal_size(self): # check illegal size self._test_set_size('illegal', is_valid=False) def _test_set_type(self, rtr_type, is_valid=True): self._test_set_with_arg_and_val('router_type', rtr_type, is_valid) def test_set_types(self): # check all router types for rtr_type in routertype.VALID_TYPES: self._test_set_type(rtr_type) def test_set_illegal_type(self): self._test_set_type('illegal', is_valid=False) vmware-nsx-12.0.1/vmware_nsx/tests/unit/osc/v2/test_subnet.py0000666000175100017510000001227113244523345024276 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import mock from openstackclient.tests.unit.network.v2 import test_subnet from openstackclient.tests.unit import utils as tests_utils from vmware_nsx.osc.v2 import subnet supported_extensions = ('dhcp-mtu', 'dns-search-domain') class TestCreateSubnet(test_subnet.TestCreateSubnet): def setUp(self): super(TestCreateSubnet, self).setUp() # Get the command object to test self.cmd = subnet.NsxCreateSubnet(self.app, self.namespace) # mock the relevant extensions get_ext = mock.patch('vmware_nsx.osc.v2.utils.get_extensions').start() get_ext.return_value = supported_extensions def _test_create_with_arg_and_val(self, arg_name, arg_val, is_valid=True): self.network.create_subnet = mock.Mock(return_value=self._subnet) # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ "--subnet-range", self._subnet.cidr, "--network", self._subnet.network_id, conv_name, str(arg_val), self._subnet.name ] verifylist = [ ('name', self._subnet.name), ('subnet_range', self._subnet.cidr), ('network', self._subnet.network_id), ('ip_version', self._subnet.ip_version), ('gateway', 'auto'), (arg_name, arg_val), ] if not is_valid: self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) return parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) self.network.create_subnet.assert_called_once_with(**{ 'cidr': mock.ANY, 'ip_version': mock.ANY, 'network_id': mock.ANY, 'name': self._subnet.name, arg_name: arg_val, }) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def _test_create_with_tag(self, add_tags=True): self.skipTest('Unblock gate') def _test_create_with_mtu(self, mtu, is_valid=True): self._test_create_with_arg_and_val('dhcp_mtu', mtu, is_valid) def test_create_with_mtu(self): # check a valid value self._test_create_with_mtu(1500) def test_create_with_illegal_mtu(self): self._test_create_with_mtu('illegal', is_valid=False) def _test_create_with_search_domain(self, val, is_valid=True): self._test_create_with_arg_and_val('dns_search_domain', val, is_valid) def test_create_with_search_domain(self): # check a valid value self._test_create_with_search_domain('www.aaa.com') # Cannot check illegal search domain - validation is on the server side class TestSetSubnet(test_subnet.TestSetSubnet): def setUp(self): super(TestSetSubnet, self).setUp() # Get the command object to test self.cmd = subnet.NsxSetSubnet(self.app, self.namespace) # mock the relevant extensions get_ext = mock.patch('vmware_nsx.osc.v2.utils.get_extensions').start() get_ext.return_value = supported_extensions def _test_set_with_arg_and_val(self, arg_name, arg_val, is_valid=True): # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ conv_name, str(arg_val), self._subnet.name, ] verifylist = [ (arg_name, arg_val), ('subnet', self._subnet.name), ] if not is_valid: self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) return parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) attrs = { arg_name: arg_val } self.network.update_subnet.assert_called_with(self._subnet, **attrs) self.assertIsNone(result) def _test_set_mtu(self, mtu, is_valid=True): self._test_set_with_arg_and_val('dhcp_mtu', mtu, is_valid) def test_set_mtu(self): # check a valid value self._test_set_mtu(1500) def test_set_illegal_mtu(self): self._test_set_mtu('illegal', is_valid=False) def _test_set_with_search_domain(self, val, is_valid=True): self._test_set_with_arg_and_val('dns_search_domain', val, is_valid) def test_set_with_search_domain(self): # check a valid value self._test_set_with_search_domain('www.aaa.com') vmware-nsx-12.0.1/vmware_nsx/tests/unit/osc/v2/__init__.py0000666000175100017510000000000013244523345023461 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/osc/v2/test_security_group.py0000666000175100017510000001533613244523345026066 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import mock from openstackclient.tests.unit.network.v2 import ( test_security_group_network as test_security_group) from openstackclient.tests.unit import utils as tests_utils from vmware_nsx.osc.v2 import security_group supported_extensions = ('security-group-logging', 'provider-security-group', 'security-group-policy') class TestCreateSecurityGroup( test_security_group.TestCreateSecurityGroupNetwork): def setUp(self): super(TestCreateSecurityGroup, self).setUp() # Get the command object to test self.cmd = security_group.NsxCreateSecurityGroup( self.app, self.namespace) # mock the relevant extensions get_ext = mock.patch('vmware_nsx.osc.v2.utils.get_extensions').start() get_ext.return_value = supported_extensions def _test_create_with_flag_arg( self, arg_name, validate_name, validate_val): self.network.create_security_group = mock.Mock( return_value=self._security_group) # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ '--description', self._security_group.description, conv_name, self._security_group.name ] verifylist = [ ('description', self._security_group.description), ('name', self._security_group.name), (arg_name, True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) self.network.create_security_group.assert_called_once_with(**{ 'description': self._security_group.description, 'name': self._security_group.name, validate_name: validate_val, }) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_with_logging(self): self._test_create_with_flag_arg('logging', 'logging', True) def test_create_with_no_logging(self): self._test_create_with_flag_arg('no_logging', 'logging', False) def test_create_with_provider(self): self._test_create_with_flag_arg('provider', 'provider', True) def _test_create_with_arg_val(self, arg_name, arg_val): self.network.create_security_group = mock.Mock( return_value=self._security_group) # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ '--description', self._security_group.description, conv_name, str(arg_val), self._security_group.name ] verifylist = [ ('description', self._security_group.description), ('name', self._security_group.name), (arg_name, arg_val), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) self.network.create_security_group.assert_called_once_with(**{ 'description': self._security_group.description, 'name': self._security_group.name, arg_name: arg_val, }) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_with_policy(self): self._test_create_with_arg_val('policy', 'policy-1') class TestSetSecurityGroup( test_security_group.TestSetSecurityGroupNetwork): def setUp(self): super(TestSetSecurityGroup, self).setUp() # Get the command object to test self.cmd = security_group.NsxSetSecurityGroup( self.app, self.namespace) # mock the relevant extensions get_ext = mock.patch('vmware_nsx.osc.v2.utils.get_extensions').start() get_ext.return_value = supported_extensions def _test_set_with_flag_arg(self, arg_name, validate_name, validate_val, is_valid=True): self.network.create_security_group = mock.Mock( return_value=self._security_group) # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ conv_name, self._security_group.name ] verifylist = [ (arg_name, True), ('group', self._security_group.name), ] if not is_valid: self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) return parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.network.update_security_group.assert_called_once_with( self._security_group, **{validate_name: validate_val}) self.assertIsNone(result) def test_set_with_logging(self): self._test_set_with_flag_arg('logging', 'logging', True) def test_set_with_no_logging(self): self._test_set_with_flag_arg('no_logging', 'logging', False) def test_set_with_provider(self): # modifying the provider flag should fail self._test_set_with_flag_arg('provider', 'provider', True, is_valid=False) def _test_set_with_arg_val(self, arg_name, arg_val): self.network.create_security_group = mock.Mock( return_value=self._security_group) # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ conv_name, str(arg_val), self._security_group.name ] verifylist = [ (arg_name, arg_val), ('group', self._security_group.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.network.update_security_group.assert_called_once_with( self._security_group, **{arg_name: arg_val}) self.assertIsNone(result) def test_set_with_policyr(self): self._test_set_with_arg_val('policy', 'policy-1') vmware-nsx-12.0.1/vmware_nsx/tests/unit/osc/__init__.py0000666000175100017510000000000013244523345023132 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/0000775000175100017510000000000013244524600022063 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/vpnaas/0000775000175100017510000000000013244524600023353 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/vpnaas/__init__.py0000666000175100017510000000000013244523345025461 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/vpnaas/test_nsxv3_vpnaas.py0000666000175100017510000004251313244523345027431 0ustar zuulzuul00000000000000# Copyright 2013, Nachi Ueno, NTT I3, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.db.models import l3 as l3_models from neutron_lib import context as n_ctx from neutron_vpnaas.tests import base from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.services.vpnaas.nsxv3 import ipsec_validator class TestDriverValidation(base.BaseTestCase): def setUp(self): super(TestDriverValidation, self).setUp() self.context = n_ctx.Context('some_user', 'some_tenant') self.service_plugin = mock.Mock() driver = mock.Mock() driver.service_plugin = self.service_plugin with mock.patch("neutron_lib.plugins.directory.get_plugin"): self.validator = ipsec_validator.IPsecV3Validator(driver) self.validator._l3_plugin = mock.Mock() self.validator._core_plugin = mock.Mock() self.vpn_service = {'router_id': 'dummy_router', 'subnet_id': 'dummy_subnet'} self.peer_address = '10.10.10.10' self.peer_cidr = '10.10.11.0/20' def _test_lifetime_not_in_seconds(self, validation_func): policy_info = {'lifetime': {'units': 'kilobytes', 'value': 1000}} self.assertRaises(nsx_exc.NsxVpnValidationError, validation_func, self.context, policy_info) def test_ike_lifetime_not_in_seconds(self): self._test_lifetime_not_in_seconds( self.validator.validate_ike_policy) def test_ipsec_lifetime_not_in_seconds(self): self._test_lifetime_not_in_seconds( self.validator.validate_ipsec_policy) def _test_lifetime_seconds_values_at_limits(self, validation_func): policy_info = {'lifetime': {'units': 'seconds', 'value': 21600}} validation_func(self.context, policy_info) policy_info = {'lifetime': {'units': 'seconds', 'value': 86400}} validation_func(self.context, policy_info) policy_info = {'lifetime': {'units': 'seconds', 'value': 10}} self.assertRaises(nsx_exc.NsxVpnValidationError, validation_func, self.context, policy_info) def test_ike_lifetime_seconds_values_at_limits(self): self._test_lifetime_seconds_values_at_limits( self.validator.validate_ike_policy) def test_ipsec_lifetime_seconds_values_at_limits(self): self._test_lifetime_seconds_values_at_limits( self.validator.validate_ipsec_policy) def _test_auth_algorithm(self, validation_func): auth_algorithm = {'auth_algorithm': 'sha384'} self.assertRaises(nsx_exc.NsxVpnValidationError, validation_func, self.context, auth_algorithm) auth_algorithm = {'auth_algorithm': 'sha512'} self.assertRaises(nsx_exc.NsxVpnValidationError, validation_func, self.context, auth_algorithm) auth_algorithm = {'auth_algorithm': 'sha1'} validation_func(self.context, auth_algorithm) auth_algorithm = {'auth_algorithm': 'sha256'} validation_func(self.context, auth_algorithm) def test_ipsec_auth_algorithm(self): self._test_auth_algorithm(self.validator.validate_ipsec_policy) def test_ike_auth_algorithm(self): self._test_auth_algorithm(self.validator.validate_ike_policy) def _test_encryption_algorithm(self, validation_func): auth_algorithm = {'encryption_algorithm': 'aes-192'} self.assertRaises(nsx_exc.NsxVpnValidationError, validation_func, self.context, auth_algorithm) auth_algorithm = {'encryption_algorithm': 'aes-128'} validation_func(self.context, auth_algorithm) auth_algorithm = {'encryption_algorithm': 'aes-256'} validation_func(self.context, auth_algorithm) def test_ipsec_encryption_algorithm(self): self._test_encryption_algorithm(self.validator.validate_ipsec_policy) def test_ike_encryption_algorithm(self): self._test_encryption_algorithm(self.validator.validate_ike_policy) def test_ike_negotiation_mode(self): policy_info = {'phase1-negotiation-mode': 'aggressive'} self.assertRaises(nsx_exc.NsxVpnValidationError, self.validator.validate_ike_policy, self.context, policy_info) policy_info = {'phase1-negotiation-mode': 'main'} self.validator.validate_ike_policy(self.context, policy_info) def _test_pfs(self, validation_func): policy_info = {'pfs': 'group15'} self.assertRaises(nsx_exc.NsxVpnValidationError, validation_func, self.context, policy_info) policy_info = {'pfs': 'group14'} validation_func(self.context, policy_info) def test_ipsec_pfs(self): self._test_pfs(self.validator.validate_ipsec_policy) def test_ike_pfs(self): self._test_pfs(self.validator.validate_ike_policy) def test_ipsec_encap_mode(self): policy_info = {'encapsulation_mode': 'transport'} self.assertRaises(nsx_exc.NsxVpnValidationError, self.validator.validate_ipsec_policy, self.context, policy_info) policy_info = {'encapsulation_mode': 'tunnel'} self.validator.validate_ipsec_policy(self.context, policy_info) def test_ipsec_transform_protocol(self): policy_info = {'transform_protocol': 'ah'} self.assertRaises(nsx_exc.NsxVpnValidationError, self.validator.validate_ipsec_policy, self.context, policy_info) policy_info = {'transform_protocol': 'esp'} self.validator.validate_ipsec_policy(self.context, policy_info) def test_vpn_service_validation_router(self): db_router = l3_models.Router() nsx_router = {'high_availability_mode': 'ACITVE_ACTIVE'} db_router.enable_snat = False with mock.patch.object(self.validator.nsxlib.logical_router, 'get', return_value=nsx_router): self.assertRaises(nsx_exc.NsxVpnValidationError, self.validator.validate_vpnservice, self.context, self.vpn_service) nsx_router = {'high_availability_mode': 'ACTIVE_STANDBY'} db_router.enable_snat = True with mock.patch.object(self.validator.nsxlib.logical_router, 'get', return_value=nsx_router),\ mock.patch.object(self.validator._core_plugin, '_get_router', return_value=db_router): self.assertRaises(nsx_exc.NsxVpnValidationError, self.validator.validate_vpnservice, self.context, self.vpn_service) nsx_router = {'high_availability_mode': 'ACTIVE_STANDBY'} db_router.enable_snat = False with mock.patch.object(self.validator.nsxlib.logical_router, 'get', return_value=nsx_router),\ mock.patch.object(self.validator._core_plugin, '_get_router', return_value=db_router): self.validator.validate_vpnservice(self.context, self.vpn_service) def _test_conn_validation(self, conn_params=None, success=True, connections=None, service_subnets=None, router_subnets=None): if connections is None: connections = [] if router_subnets is None: router_subnets = [] def mock_get_routers(context, filters=None, fields=None): return [{'id': 'no-snat', 'external_gateway_info': {'enable_snat': False}}] def mock_get_service(context, service_id): if service_subnets: # option to give the test a different subnet per service subnet_cidr = service_subnets[int(service_id) - 1] else: subnet_cidr = '5.5.5.0/2%s' % service_id return {'id': service_id, 'router_id': service_id, 'subnet_id': 'dummy_subnet', 'external_v4_ip': '1.1.1.%s' % service_id, 'subnet': {'id': 'dummy_subnet', 'cidr': subnet_cidr}} def mock_get_connections(context, filters=None, fields=None): if filters and 'peer_address' in filters: return [conn for conn in connections if conn['peer_address'] == filters['peer_address'][0]] else: return connections with mock.patch.object(self.validator.vpn_plugin, '_get_vpnservice', side_effect=mock_get_service),\ mock.patch.object(self.validator._core_plugin, 'get_routers', side_effect=mock_get_routers),\ mock.patch.object(self.validator._core_plugin, '_find_router_subnets_cidrs', return_value=router_subnets),\ mock.patch.object(self.validator.vpn_plugin, 'get_ipsec_site_connections', side_effect=mock_get_connections): ipsec_sitecon = {'id': '1', 'vpnservice_id': '1', 'mtu': 1500, 'peer_address': self.peer_address, 'peer_cidrs': [self.peer_cidr]} if conn_params: ipsec_sitecon.update(conn_params) if success: self.validator.validate_ipsec_site_connection( self.context, ipsec_sitecon) else: self.assertRaises( nsx_exc.NsxVpnValidationError, self.validator.validate_ipsec_site_connection, self.context, ipsec_sitecon) def test_dpd_validation(self): params = {'dpd': {'action': 'hold', 'timeout': 120}} self._test_conn_validation(conn_params=params, success=True) params = {'dpd': {'action': 'clear', 'timeout': 120}} self._test_conn_validation(conn_params=params, success=False) params = {'dpd': {'action': 'hold', 'timeout': 2}} self._test_conn_validation(conn_params=params, success=False) def test_check_unique_addresses(self): # this test runs with non-overlapping local subnets on # different routers subnets = ['5.5.5.0/20', '6.6.6.0/20'] # same service/router gw & peer address - should fail connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '1', 'peer_address': self.peer_address, 'peer_cidrs': [self.peer_cidr]}] self._test_conn_validation(success=False, connections=connections, service_subnets=subnets) # different service/router gw - ok connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=True, connections=connections, service_subnets=subnets) # different peer address - ok connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '1', 'peer_address': '7.7.7.1', 'peer_cidrs': ['7.7.7.7']}] self._test_conn_validation(success=True, connections=connections, service_subnets=subnets) # ignoring non-active connections connections = [{'id': '2', 'status': 'ERROR', 'vpnservice_id': '1', 'peer_address': self.peer_address, 'peer_cidrs': [self.peer_cidr]}] self._test_conn_validation(success=True, connections=connections, service_subnets=subnets) def test_overlapping_rules(self): # peer-cidr overlapping with new one, same subnet - should fail connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '1', 'peer_address': '9.9.9.9', 'peer_cidrs': ['10.10.11.1/19']}] self._test_conn_validation(success=False, connections=connections) # same peer-cidr, overlapping subnets - should fail connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': '9.9.9.9', 'peer_cidrs': [self.peer_cidr]}] self._test_conn_validation(success=False, connections=connections) # non overlapping peer-cidr, same subnet - ok connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '1', 'peer_address': '7.7.7.1', 'peer_cidrs': ['7.7.7.7']}] self._test_conn_validation(success=True, connections=connections) # ignoring non-active connections connections = [{'id': '2', 'status': 'ERROR', 'vpnservice_id': '1', 'peer_address': '9.9.9.9', 'peer_cidrs': ['10.10.11.1/19']}] self._test_conn_validation(success=True, connections=connections) def test_advertisment(self): # different routers, same subnet - should fail subnets = ['5.5.5.0/20', '5.5.5.0/20'] connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=False, connections=connections, service_subnets=subnets) # different routers, overlapping subnet - should fail subnets = ['5.5.5.0/20', '5.5.5.0/21'] connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=False, connections=connections, service_subnets=subnets) # different routers, non overlapping subnet - ok subnets = ['5.5.5.0/20', '50.5.5.0/21'] connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=True, connections=connections, service_subnets=subnets) # no-snat router with overlapping subnet to the service subnet - fail subnets = ['5.5.5.0/21', '1.1.1.0/20'] connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=False, connections=connections, router_subnets=subnets) # no-snat router with non overlapping subnet to the service subnet - ok service_subnets = ['5.5.5.0/20', '6.6.6.0/20'] router_subnets = ['50.5.5.0/21', '1.1.1.0/20'] connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=True, connections=connections, service_subnets=service_subnets, router_subnets=router_subnets) # TODO(asarfaty): add tests for the driver vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/vpnaas/test_nsxv_vpnaas.py0000666000175100017510000004132613244523345027347 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib import context from neutron_lib.plugins import directory from neutron_vpnaas.db.vpn import vpn_models # noqa from neutron_vpnaas.extensions import vpnaas from oslo_utils import uuidutils from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as vcns_exc from vmware_nsx.services.vpnaas.nsxv import ipsec_driver from vmware_nsx.tests.unit.nsx_v import test_plugin _uuid = uuidutils.generate_uuid DRIVER_PATH = "vmware_nsx.services.vpnaas.nsxv.ipsec_driver.NSXvIPsecVpnDriver" VALI_PATH = "vmware_nsx.services.vpnaas.nsxv.ipsec_validator.IPsecValidator" FAKE_ROUTER_ID = "aaaaaa-bbbbb-ccc" FAKE_VPNSERVICE_ID = _uuid() FAKE_IPSEC_CONNECTION = {"vpnservice_id": FAKE_VPNSERVICE_ID, "id": _uuid()} FAKE_EDGE_ID = _uuid() FAKE_IPSEC_VPN_SITE = {"peerIp": "192.168.1.1"} FAKE_VCNSAPIEXC = {"status": "fail", "head": "fake_head", "response": "error"} FAKE_NEW_CONNECTION = {"peer_cidrs": "192.168.1.0/24"} class TestVpnaasDriver(test_plugin.NsxVPluginV2TestCase): def setUp(self): super(TestVpnaasDriver, self).setUp() self.context = context.get_admin_context() self.service_plugin = mock.Mock() self.validator = mock.Mock() self.driver = ipsec_driver.NSXvIPsecVpnDriver(self.service_plugin) self.plugin = directory.get_plugin() self.l3plugin = self.plugin @contextlib.contextmanager def router(self, name='vpn-test-router', tenant_id=_uuid(), admin_state_up=True, **kwargs): request = {'router': {'tenant_id': tenant_id, 'name': name, 'admin_state_up': admin_state_up}} for arg in kwargs: request['router'][arg] = kwargs[arg] router = self.l3plugin.create_router(self.context, request) yield router @mock.patch('%s._convert_ipsec_conn' % DRIVER_PATH) @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) @mock.patch('%s._generate_new_sites' % DRIVER_PATH) @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) @mock.patch('%s._update_status' % DRIVER_PATH) @mock.patch('%s._update_firewall_rules' % DRIVER_PATH) def test_create_ipsec_site_connection(self, mock_update_fw, mock_update_status, mock_update_ipsec, mock_gen_new, mock_get_id, mock_conv_ipsec): mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) mock_conv_ipsec.return_value = FAKE_IPSEC_VPN_SITE mock_gen_new.return_value = FAKE_IPSEC_VPN_SITE self.driver.create_ipsec_site_connection(self.context, FAKE_IPSEC_CONNECTION) mock_conv_ipsec.assert_called_with(self.context, FAKE_IPSEC_CONNECTION) mock_get_id.assert_called_with(self.context, FAKE_VPNSERVICE_ID) mock_gen_new.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE) mock_update_ipsec.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE, enabled=True) mock_update_fw.assert_called_with(self.context, FAKE_VPNSERVICE_ID) mock_update_status.assert_called_with( self.context, FAKE_IPSEC_CONNECTION["vpnservice_id"], FAKE_IPSEC_CONNECTION["id"], "ACTIVE") @mock.patch('%s._convert_ipsec_conn' % DRIVER_PATH) @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) @mock.patch('%s._generate_new_sites' % DRIVER_PATH) @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) @mock.patch('%s._update_status' % DRIVER_PATH) def test_create_ipsec_site_connection_fail(self, mock_update_status, mock_update_ipsec, mock_gen_new, mock_get_id, mock_conv_ipsec): mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) mock_conv_ipsec.return_value = FAKE_IPSEC_VPN_SITE mock_gen_new.return_value = FAKE_IPSEC_VPN_SITE mock_update_ipsec.side_effect = ( vcns_exc.VcnsApiException(**FAKE_VCNSAPIEXC)) self.assertRaises(nsxv_exc.NsxPluginException, self.driver.create_ipsec_site_connection, self.context, FAKE_IPSEC_CONNECTION) mock_conv_ipsec.assert_called_with(self.context, FAKE_IPSEC_CONNECTION) mock_get_id.assert_called_with(self.context, FAKE_VPNSERVICE_ID) mock_gen_new.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE) mock_update_ipsec.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE, enabled=True) mock_update_status.assert_called_with( self.context, FAKE_IPSEC_CONNECTION["vpnservice_id"], FAKE_IPSEC_CONNECTION["id"], "ERROR") @mock.patch('%s._convert_ipsec_conn' % DRIVER_PATH) @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) @mock.patch('%s._generate_new_sites' % DRIVER_PATH) @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) @mock.patch('%s._update_status' % DRIVER_PATH) @mock.patch('%s._update_firewall_rules' % DRIVER_PATH) def test_update_fw_fail(self, mock_update_fw, mock_update_status, mock_update_ipsec, mock_gen_new, mock_get_id, mock_conv_ipsec): mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) mock_conv_ipsec.return_value = FAKE_IPSEC_VPN_SITE mock_gen_new.return_value = FAKE_IPSEC_VPN_SITE mock_update_fw.side_effect = ( vcns_exc.VcnsApiException(**FAKE_VCNSAPIEXC)) self.assertRaises(nsxv_exc.NsxPluginException, self.driver.create_ipsec_site_connection, self.context, FAKE_IPSEC_CONNECTION) mock_conv_ipsec.assert_called_with(self.context, FAKE_IPSEC_CONNECTION) mock_get_id.assert_called_with(self.context, FAKE_VPNSERVICE_ID) mock_gen_new.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE) mock_update_ipsec.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE, enabled=True) mock_update_fw.assert_called_with(self.context, FAKE_VPNSERVICE_ID) mock_update_status.assert_called_with( self.context, FAKE_IPSEC_CONNECTION["vpnservice_id"], FAKE_IPSEC_CONNECTION["id"], "ERROR") @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) @mock.patch('%s._update_site_dict' % DRIVER_PATH) @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) @mock.patch('%s._update_firewall_rules' % DRIVER_PATH) def test_update_ipsec(self, mock_update_fw, mock_update_ipsec, mock_update_sites, mock_get_id): mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) mock_update_sites.return_value = FAKE_IPSEC_VPN_SITE self.driver.update_ipsec_site_connection(self.context, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) mock_update_sites.assert_called_with(self.context, FAKE_EDGE_ID, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) mock_update_ipsec.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE) mock_update_fw.assert_called_with(self.context, FAKE_VPNSERVICE_ID) @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) @mock.patch('%s._update_site_dict' % DRIVER_PATH) @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) @mock.patch('%s._update_firewall_rules' % DRIVER_PATH) def test_update_ipsec_fail_with_notfound(self, mock_update_fw, mock_update_ipsec, mock_update_sites, mock_get_id): mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) mock_update_sites.return_value = {} self.assertRaises(nsxv_exc.NsxIPsecVpnMappingNotFound, self.driver.update_ipsec_site_connection, self.context, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) mock_update_sites.assert_called_with(self.context, FAKE_EDGE_ID, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) @mock.patch('%s._update_site_dict' % DRIVER_PATH) @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) @mock.patch('%s._update_firewall_rules' % DRIVER_PATH) def test_update_ipsec_fail_with_fw_fail(self, mock_update_fw, mock_update_ipsec, mock_update_sites, mock_get_id): mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) mock_update_fw.side_effect = ( vcns_exc.VcnsApiException(**FAKE_VCNSAPIEXC)) self.assertRaises(nsxv_exc.NsxPluginException, self.driver.update_ipsec_site_connection, self.context, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) mock_update_sites.assert_called_with(self.context, FAKE_EDGE_ID, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) mock_update_fw.assert_called_with(self.context, FAKE_VPNSERVICE_ID) @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) @mock.patch('%s._update_site_dict' % DRIVER_PATH) @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) @mock.patch('%s._update_status' % DRIVER_PATH) def test_update_ipsec_fail_with_site_fail(self, mock_update_status, mock_update_ipsec, mock_update_sites, mock_get_id): mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) mock_update_sites.return_value = FAKE_IPSEC_VPN_SITE mock_update_ipsec.side_effect = ( vcns_exc.VcnsApiException(**FAKE_VCNSAPIEXC)) self.assertRaises(nsxv_exc.NsxPluginException, self.driver.update_ipsec_site_connection, self.context, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) mock_update_sites.assert_called_with(self.context, FAKE_EDGE_ID, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) mock_update_ipsec.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE) mock_update_status.assert_called_with( self.context, FAKE_IPSEC_CONNECTION["vpnservice_id"], FAKE_IPSEC_CONNECTION["id"], "ERROR") def test_create_vpn_service_legal(self): """Create a legal vpn service""" # create an external network with a subnet, and an exclusive router providernet_args = {extnet_apidef.EXTERNAL: True} with self.network(name='ext-net', providernet_args=providernet_args, arg_list=(extnet_apidef.EXTERNAL, )) as ext_net,\ self.subnet(ext_net),\ self.router(router_type='exclusive', external_gateway_info={'network_id': ext_net['network']['id']}) as router,\ self.subnet() as sub: # add an interface to the router self.l3plugin.add_router_interface( self.context, router['id'], {'subnet_id': sub['subnet']['id']}) # create the service vpnservice = {'router_id': router['id'], 'id': _uuid(), 'subnet_id': sub['subnet']['id']} with mock.patch.object(self.driver, '_get_gateway_ips', return_value=(None, None)): self.driver.create_vpnservice(self.context, vpnservice) def test_create_vpn_service_on_shared_router(self): """Creating a service with shared router is not allowed""" # create an external network with a subnet, and a shared router providernet_args = {extnet_apidef.EXTERNAL: True} with self.network(name='ext-net', providernet_args=providernet_args, arg_list=(extnet_apidef.EXTERNAL, )) as ext_net,\ self.subnet(ext_net),\ self.router(router_type='shared', external_gateway_info={'network_id': ext_net['network']['id']}) as router,\ self.subnet() as sub: # add an interface to the router self.l3plugin.add_router_interface( self.context, router['id'], {'subnet_id': sub['subnet']['id']}) # create the service vpnservice = {'router_id': router['id'], 'id': _uuid(), 'subnet_id': sub['subnet']['id']} self.assertRaises(nsxv_exc.NsxPluginException, self.driver.create_vpnservice, self.context, vpnservice) def test_create_vpn_service_on_router_without_if(self): """Creating a service with unattached subnet is not allowed""" # create an external network with a subnet, and an exclusive router providernet_args = {extnet_apidef.EXTERNAL: True} with self.network(name='ext-net', providernet_args=providernet_args, arg_list=(extnet_apidef.EXTERNAL, )) as ext_net,\ self.subnet(ext_net),\ self.router(router_type='exclusive', external_gateway_info={'network_id': ext_net['network']['id']}) as router,\ self.subnet() as sub: # create the service vpnservice = {'router_id': router['id'], 'id': _uuid(), 'subnet_id': sub['subnet']['id']} self.assertRaises(vpnaas.SubnetIsNotConnectedToRouter, self.driver.create_vpnservice, self.context, vpnservice) def test_create_vpn_service_without_subnet(self): """Creating a service without a subnet is not allowed""" # create an external network with a subnet, and an exclusive router providernet_args = {extnet_apidef.EXTERNAL: True} with self.network(name='ext-net', providernet_args=providernet_args, arg_list=(extnet_apidef.EXTERNAL, )) as ext_net,\ self.subnet(ext_net),\ self.router(router_type='exclusive', external_gateway_info={'network_id': ext_net['network']['id']}) as router,\ self.subnet() as sub: # add an interface to the router self.l3plugin.add_router_interface( self.context, router['id'], {'subnet_id': sub['subnet']['id']}) # create the service without the subnet vpnservice = {'router_id': router['id'], 'id': _uuid(), 'subnet_id': None} self.assertRaises(nsxv_exc.NsxPluginException, self.driver.create_vpnservice, self.context, vpnservice) vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/trunk/0000775000175100017510000000000013244524600023226 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/trunk/test_nsxv3_driver.py0000666000175100017510000001630413244523345027306 0ustar zuulzuul00000000000000# Copyright (c) 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.tests import base from neutron_lib import context from oslo_config import cfg from oslo_utils import importutils from vmware_nsx.common import nsx_constants from vmware_nsx.services.trunk.nsx_v3 import driver as trunk_driver from vmware_nsx.tests.unit.nsx_v3 import test_constants as test_consts from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsx_v3_plugin class TestNsxV3TrunkHandler(test_nsx_v3_plugin.NsxV3PluginTestCaseMixin, base.BaseTestCase): def setUp(self): super(TestNsxV3TrunkHandler, self).setUp() self.context = context.get_admin_context() self.core_plugin = importutils.import_object(test_consts.PLUGIN_NAME) self.handler = trunk_driver.NsxV3TrunkHandler(self.core_plugin) self.handler._update_port_at_backend = mock.Mock() self.trunk_1 = mock.Mock() self.trunk_1.port_id = "parent_port_1" self.trunk_2 = mock.Mock() self.trunk_2.port_id = "parent_port_2" self.sub_port_1 = mock.Mock() self.sub_port_1.segmentation_id = 40 self.sub_port_1.trunk_id = "trunk-1" self.sub_port_1.port_id = "sub_port_1" self.sub_port_2 = mock.Mock() self.sub_port_2.segmentation_id = 41 self.sub_port_2.trunk_id = "trunk-2" self.sub_port_2.port_id = "sub_port_2" self.sub_port_3 = mock.Mock() self.sub_port_3.segmentation_id = 43 self.sub_port_3.trunk_id = "trunk-2" self.sub_port_3.port_id = "sub_port_3" def test_trunk_created(self): # Create trunk with no subport self.trunk_1.sub_ports = [] self.handler.trunk_created(self.context, self.trunk_1) self.handler._update_port_at_backend.assert_not_called() # Create trunk with 1 subport self.trunk_1.sub_ports = [self.sub_port_1] self.handler.trunk_created(self.context, self.trunk_1) self.handler._update_port_at_backend.assert_called_with( self.context, self.trunk_1.port_id, self.sub_port_1) # Create trunk with multiple subports self.trunk_2.sub_ports = [self.sub_port_2, self.sub_port_3] self.handler.trunk_created(self.context, self.trunk_2) calls = [mock.call._update_port_at_backend( self.context, self.trunk_2.port_id, self.sub_port_2), mock.call._update_port_at_backend( self.context, self.trunk_2.port_id, self.sub_port_3)] self.handler._update_port_at_backend.assert_has_calls( calls, any_order=True) def test_trunk_deleted(self): # Delete trunk with no subport self.trunk_1.sub_ports = [] self.handler.trunk_deleted(self.context, self.trunk_1) self.handler._update_port_at_backend.assert_not_called() # Delete trunk with 1 subport self.trunk_1.sub_ports = [self.sub_port_1] self.handler.trunk_deleted(self.context, self.trunk_1) self.handler._update_port_at_backend.assert_called_with( context=self.context, parent_port_id=None, subport=self.sub_port_1) # Delete trunk with multiple subports self.trunk_2.sub_ports = [self.sub_port_2, self.sub_port_3] self.handler.trunk_deleted(self.context, self.trunk_2) calls = [mock.call._update_port_at_backend( context=self.context, parent_port_id=None, subport=self.sub_port_2), mock.call._update_port_at_backend( context=self.context, parent_port_id=None, subport=self.sub_port_3)] self.handler._update_port_at_backend.assert_has_calls( calls, any_order=True) def test_subports_added(self): # Update trunk with no subport sub_ports = [] self.handler.subports_added(self.context, self.trunk_1, sub_ports) self.handler._update_port_at_backend.assert_not_called() # Update trunk with 1 subport sub_ports = [self.sub_port_1] self.handler.subports_added(self.context, self.trunk_1, sub_ports) self.handler._update_port_at_backend.assert_called_with( self.context, self.trunk_1.port_id, self.sub_port_1) # Update trunk with multiple subports sub_ports = [self.sub_port_2, self.sub_port_3] self.handler.subports_added(self.context, self.trunk_2, sub_ports) calls = [mock.call._update_port_at_backend( self.context, self.trunk_2.port_id, self.sub_port_2), mock.call._update_port_at_backend( self.context, self.trunk_2.port_id, self.sub_port_3)] self.handler._update_port_at_backend.assert_has_calls( calls, any_order=True) def test_subports_deleted(self): # Update trunk to remove no subport sub_ports = [] self.handler.subports_deleted(self.context, self.trunk_1, sub_ports) self.handler._update_port_at_backend.assert_not_called() # Update trunk to remove 1 subport sub_ports = [self.sub_port_1] self.handler.subports_deleted(self.context, self.trunk_1, sub_ports) self.handler._update_port_at_backend.assert_called_with( context=self.context, parent_port_id=None, subport=self.sub_port_1) # Update trunk to remove multiple subports sub_ports = [self.sub_port_2, self.sub_port_3] self.handler.subports_deleted(self.context, self.trunk_2, sub_ports) calls = [mock.call._update_port_at_backend( context=self.context, parent_port_id=None, subport=self.sub_port_2), mock.call._update_port_at_backend( context=self.context, parent_port_id=None, subport=self.sub_port_3)] self.handler._update_port_at_backend.assert_has_calls( calls, any_order=True) class TestNsxV3TrunkDriver(base.BaseTestCase): def setUp(self): super(TestNsxV3TrunkDriver, self).setUp() def test_is_loaded(self): driver = trunk_driver.NsxV3TrunkDriver.create(mock.Mock()) cfg.CONF.set_override('core_plugin', nsx_constants.VMWARE_NSX_V3_PLUGIN_NAME) self.assertTrue(driver.is_loaded) cfg.CONF.set_override('core_plugin', 'not_vmware_nsx_plugin') self.assertFalse(driver.is_loaded) vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/trunk/__init__.py0000666000175100017510000000000013244523345025334 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/lbaas/0000775000175100017510000000000013244524600023145 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/lbaas/test_nsxv3_driver.py0000666000175100017510000012310613244523345027224 0ustar zuulzuul00000000000000# Copyright (c) 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.tests import base from neutron_lbaas.services.loadbalancer import data_models as lb_models from neutron_lib import context from neutron_lib import exceptions as n_exc from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas.nsx_v3 import lb_driver_v2 from vmware_nsx.services.lbaas.nsx_v3 import lb_utils LB_VIP = '10.0.0.10' LB_ROUTER_ID = 'router-x' LB_ID = 'xxx-xxx' LB_TENANT_ID = 'yyy-yyy' LB_SERVICE_ID = 'service-1' LB_BINDING = {'loadbalancer_id': LB_ID, 'lb_service_id': LB_SERVICE_ID, 'lb_router_id': LB_ROUTER_ID, 'vip_address': LB_VIP} LB_NETWORK = {'router:external': False, 'id': 'xxxxx', 'name': 'network-1'} LISTENER_ID = 'listener-x' APP_PROFILE_ID = 'appp-x' LB_VS_ID = 'vs-x' LB_APP_PROFILE = { "resource_type": "LbHttpProfile", "description": "my http profile", "id": APP_PROFILE_ID, "display_name": "httpprofile1", "ntlm": False, "request_header_size": 1024, "http_redirect_to_https": False, "idle_timeout": 1800, "x_forwarded_for": "INSERT", } LISTENER_BINDING = {'loadbalancer_id': LB_ID, 'listener_id': LISTENER_ID, 'app_profile_id': APP_PROFILE_ID, 'lb_vs_id': LB_VS_ID} POOL_ID = 'ppp-qqq' LB_POOL_ID = 'pool-xx' LB_POOL = { "display_name": "httppool1", "description": "my http pool", "id": LB_POOL_ID, "algorithm": "ROUND_ROBIN", } POOL_BINDING = {'loadbalancer_id': LB_ID, 'pool_id': POOL_ID, 'lb_pool_id': LB_POOL_ID, 'lb_vs_id': LB_VS_ID} MEMBER_ID = 'mmm-mmm' MEMBER_ADDRESS = '10.0.0.200' LB_MEMBER = {'display_name': 'member1_' + MEMBER_ID, 'weight': 1, 'ip_address': MEMBER_ADDRESS, 'port': 80} LB_POOL_WITH_MEMBER = { "display_name": "httppool1", "description": "my http pool", "id": LB_POOL_ID, "algorithm": "ROUND_ROBIN", "members": [ { "display_name": "http-member1", "ip_address": MEMBER_ADDRESS, "port": "80", "weight": "1", "admin_state": "ENABLED" } ] } HM_ID = 'hhh-mmm' LB_MONITOR_ID = 'mmm-ddd' HM_BINDING = {'loadbalancer_id': LB_ID, 'pool_id': POOL_ID, 'hm_id': HM_ID, 'lb_monitor_id': LB_MONITOR_ID, 'lb_pool_id': LB_POOL_ID} L7POLICY_ID = 'l7policy-xxx' LB_RULE_ID = 'lb-rule-xx' L7RULE_ID = 'l7rule-111' L7POLICY_BINDING = {'l7policy_id': L7POLICY_ID, 'lb_vs_id': LB_VS_ID, 'lb_rule_id': LB_RULE_ID} FAKE_CERT = {'id': 'cert-xyz'} class BaseTestEdgeLbaasV2(base.BaseTestCase): def _tested_entity(self): return None def setUp(self): super(BaseTestEdgeLbaasV2, self).setUp() self.context = context.get_admin_context() self.edge_driver = lb_driver_v2.EdgeLoadbalancerDriverV2() self.lbv2_driver = mock.Mock() self.core_plugin = mock.Mock() base_mgr.LoadbalancerBaseManager._lbv2_driver = self.lbv2_driver base_mgr.LoadbalancerBaseManager._core_plugin = self.core_plugin self._patch_lb_plugin(self.lbv2_driver, self._tested_entity) self._patch_nsxlib_lb_clients(self.core_plugin) self.lb = lb_models.LoadBalancer(LB_ID, LB_TENANT_ID, 'lb1', '', 'some-subnet', 'port-id', LB_VIP) self.listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1', '', None, LB_ID, 'HTTP', protocol_port=80, loadbalancer=self.lb) self.https_listener = lb_models.Listener( LISTENER_ID, LB_TENANT_ID, 'listener1', '', None, LB_ID, 'HTTPS', protocol_port=443, loadbalancer=self.lb) self.terminated_https_listener = lb_models.Listener( LISTENER_ID, LB_TENANT_ID, 'listener1', '', None, LB_ID, 'TERMINATED_HTTPS', protocol_port=443, loadbalancer=self.lb) self.pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool1', '', None, 'HTTP', 'ROUND_ROBIN', loadbalancer_id=LB_ID, listener=self.listener, listeners=[self.listener], loadbalancer=self.lb) self.member = lb_models.Member(MEMBER_ID, LB_TENANT_ID, POOL_ID, MEMBER_ADDRESS, 80, 1, pool=self.pool, name='member1') self.hm = lb_models.HealthMonitor(HM_ID, LB_TENANT_ID, 'PING', 3, 3, 1, pool=self.pool, name='hm1') self.l7policy = lb_models.L7Policy(L7POLICY_ID, LB_TENANT_ID, name='policy-test', description='policy-desc', listener_id=LISTENER_ID, action='REDIRECT_TO_POOL', redirect_pool_id=POOL_ID, listener=self.listener, position=1) self.l7rule = lb_models.L7Rule(L7RULE_ID, LB_TENANT_ID, l7policy_id=L7POLICY_ID, compare_type='EQUAL_TO', invert=False, type='HEADER', key='key1', value='val1', policy=self.l7policy) def tearDown(self): self._unpatch_lb_plugin(self.lbv2_driver, self._tested_entity) super(BaseTestEdgeLbaasV2, self).tearDown() def _patch_lb_plugin(self, lb_plugin, manager): self.real_manager = getattr(lb_plugin, manager) lb_manager = mock.patch.object(lb_plugin, manager).start() mock.patch.object(lb_manager, 'create').start() mock.patch.object(lb_manager, 'update').start() mock.patch.object(lb_manager, 'delete').start() mock.patch.object(lb_manager, 'successful_completion').start() def _patch_nsxlib_lb_clients(self, core_plugin): nsxlib = mock.patch.object(core_plugin, 'nsxlib').start() load_balancer = mock.patch.object(nsxlib, 'load_balancer').start() self.service_client = mock.patch.object(load_balancer, 'service').start() self.app_client = mock.patch.object(load_balancer, 'application_profile').start() self.vs_client = mock.patch.object(load_balancer, 'virtual_server').start() self.pool_client = mock.patch.object(load_balancer, 'pool').start() self.monitor_client = mock.patch.object(load_balancer, 'monitor').start() self.rule_client = mock.patch.object(load_balancer, 'rule').start() self.tm_client = mock.patch.object(nsxlib, 'trust_management').start() def _unpatch_lb_plugin(self, lb_plugin, manager): setattr(lb_plugin, manager, self.real_manager) class TestEdgeLbaasV2Loadbalancer(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Loadbalancer, self).setUp() @property def _tested_entity(self): return 'load_balancer' def test_create(self): with mock.patch.object(lb_utils, 'validate_lb_subnet' ) as mock_validate_lb_subnet: mock_validate_lb_subnet.return_value = True self.edge_driver.loadbalancer.create(self.context, self.lb) mock_successful_completion = ( self.lbv2_driver.load_balancer.successful_completion) mock_successful_completion.assert_called_with(self.context, self.lb) def test_update(self): new_lb = lb_models.LoadBalancer(LB_ID, 'yyy-yyy', 'lb1-new', 'new-description', 'some-subnet', 'port-id', LB_VIP) self.edge_driver.loadbalancer.update(self.context, self.lb, new_lb) mock_successful_completion = ( self.lbv2_driver.load_balancer.successful_completion) mock_successful_completion.assert_called_with(self.context, new_lb) def test_delete(self): with mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(self.service_client, 'get' ) as mock_get_lb_service, \ mock.patch.object(self.service_client, 'delete' ) as mock_delete_lb_service, \ mock.patch.object(nsx_db, 'delete_nsx_lbaas_loadbalancer_binding' ) as mock_delete_lb_binding: mock_get_lb_binding.return_value = LB_BINDING mock_get_lb_service.return_value = {'id': LB_SERVICE_ID} self.edge_driver.loadbalancer.delete(self.context, self.lb) mock_delete_lb_service.assert_called_with(LB_SERVICE_ID) mock_delete_lb_binding.assert_called_with( self.context.session, LB_ID) mock_successful_completion = ( self.lbv2_driver.load_balancer.successful_completion) mock_successful_completion.assert_called_with(self.context, self.lb, delete=True) def test_stats(self): pass def test_refresh(self): pass class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Listener, self).setUp() @property def _tested_entity(self): return 'listener' def _create_listener(self, protocol='HTTP'): with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(self.app_client, 'create' ) as mock_create_app_profile, \ mock.patch.object(self.vs_client, 'create' ) as mock_create_virtual_server, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(self.service_client, 'add_virtual_server' ) as mock_add_virtual_server, \ mock.patch.object(nsx_db, 'add_nsx_lbaas_listener_binding' ) as mock_add_listener_binding: mock_get_floatingips.return_value = [] mock_create_app_profile.return_value = {'id': APP_PROFILE_ID} mock_create_virtual_server.return_value = {'id': LB_VS_ID} mock_get_lb_binding.return_value = LB_BINDING listener = self.listener if protocol == 'HTTPS': listener = self.https_listener self.edge_driver.listener.create(self.context, listener) mock_add_virtual_server.assert_called_with(LB_SERVICE_ID, LB_VS_ID) mock_add_listener_binding.assert_called_with( self.context.session, LB_ID, LISTENER_ID, APP_PROFILE_ID, LB_VS_ID) mock_successful_completion = ( self.lbv2_driver.listener.successful_completion) mock_successful_completion.assert_called_with(self.context, listener) def test_create_http_listener(self): self._create_listener() def test_create_https_listener(self): self._create_listener(protocol='HTTPS') def test_create_terminated_https(self): with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(self.tm_client, 'create_cert' ) as mock_create_cert, \ mock.patch.object(self.app_client, 'create' ) as mock_create_app_profile, \ mock.patch.object(self.vs_client, 'create' ) as mock_create_virtual_server, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(self.service_client, 'add_virtual_server' ) as mock_add_virtual_server, \ mock.patch.object(nsx_db, 'add_nsx_lbaas_listener_binding' ) as mock_add_listener_binding: mock_get_floatingips.return_value = [] mock_create_cert.return_value = FAKE_CERT['id'] mock_create_app_profile.return_value = {'id': APP_PROFILE_ID} mock_create_virtual_server.return_value = {'id': LB_VS_ID} mock_get_lb_binding.return_value = LB_BINDING self.edge_driver.listener.create(self.context, self.terminated_https_listener) mock_add_virtual_server.assert_called_with(LB_SERVICE_ID, LB_VS_ID) mock_add_listener_binding.assert_called_with( self.context.session, LB_ID, LISTENER_ID, APP_PROFILE_ID, LB_VS_ID) mock_successful_completion = ( self.lbv2_driver.listener.successful_completion) mock_successful_completion.assert_called_with( self.context, self.terminated_https_listener) def test_update(self): new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1-new', 'new-description', None, LB_ID, protocol_port=80, loadbalancer=self.lb) with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding' ) as mock_get_listener_binding: mock_get_floatingips.return_value = [] mock_get_listener_binding.return_value = LISTENER_BINDING self.edge_driver.listener.update(self.context, self.listener, new_listener) mock_successful_completion = ( self.lbv2_driver.listener.successful_completion) mock_successful_completion.assert_called_with(self.context, new_listener) def test_delete(self): with mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(self.service_client, 'get' ) as mock_get_lb_service, \ mock.patch.object(self.service_client, 'remove_virtual_server' ) as mock_remove_virtual_server, \ mock.patch.object(self.app_client, 'delete' ) as mock_delete_app_profile, \ mock.patch.object(self.vs_client, 'delete' ) as mock_delete_virtual_server, \ mock.patch.object(nsx_db, 'delete_nsx_lbaas_listener_binding', ) as mock_delete_listener_binding: mock_get_listener_binding.return_value = LISTENER_BINDING mock_get_lb_binding.return_value = LB_BINDING mock_get_lb_service.return_value = { 'id': LB_SERVICE_ID, 'virtual_server_ids': [LB_VS_ID]} self.edge_driver.listener.delete(self.context, self.listener) mock_remove_virtual_server.assert_called_with(LB_SERVICE_ID, LB_VS_ID) mock_delete_virtual_server.assert_called_with(LB_VS_ID) mock_delete_app_profile.assert_called_with(APP_PROFILE_ID) mock_delete_listener_binding.assert_called_with( self.context.session, LB_ID, LISTENER_ID) mock_successful_completion = ( self.lbv2_driver.listener.successful_completion) mock_successful_completion.assert_called_with(self.context, self.listener, delete=True) class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Pool, self).setUp() @property def _tested_entity(self): return 'pool' def test_create(self): with mock.patch.object(self.pool_client, 'create' ) as mock_create_pool, \ mock.patch.object(nsx_db, 'add_nsx_lbaas_pool_binding' ) as mock_add_pool_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(self.vs_client, 'update', return_value=None), \ mock.patch.object(nsx_db, 'update_nsx_lbaas_pool_binding' ) as mock_update_pool_binding: mock_create_pool.return_value = {'id': LB_POOL_ID} mock_get_listener_binding.return_value = LISTENER_BINDING self.edge_driver.pool.create(self.context, self.pool) mock_add_pool_binding.assert_called_with( self.context.session, LB_ID, POOL_ID, LB_POOL_ID) mock_update_pool_binding.assert_called_with( self.context.session, LB_ID, POOL_ID, LB_VS_ID) mock_successful_completion = ( self.lbv2_driver.pool.successful_completion) mock_successful_completion.assert_called_with(self.context, self.pool) def test_update(self): new_pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool-name', '', None, 'HTTP', 'LEAST_CONNECTIONS', listener=self.listener) with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding: mock_get_pool_binding.return_value = POOL_BINDING self.edge_driver.pool.update(self.context, self.pool, new_pool) mock_successful_completion = ( self.lbv2_driver.pool.successful_completion) mock_successful_completion.assert_called_with(self.context, new_pool) def test_delete(self): with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.vs_client, 'update', return_value=None ) as mock_update_virtual_server, \ mock.patch.object(self.pool_client, 'delete' ) as mock_delete_pool, \ mock.patch.object(nsx_db, 'delete_nsx_lbaas_pool_binding' ) as mock_delete_pool_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding: mock_get_pool_binding.return_value = POOL_BINDING mock_get_lb_binding.return_value = None self.edge_driver.pool.delete(self.context, self.pool) mock_update_virtual_server.assert_called_with(LB_VS_ID, pool_id='') mock_delete_pool.assert_called_with(LB_POOL_ID) mock_delete_pool_binding.assert_called_with( self.context.session, LB_ID, POOL_ID) mock_successful_completion = ( self.lbv2_driver.pool.successful_completion) mock_successful_completion.assert_called_with(self.context, self.pool, delete=True) class TestEdgeLbaasV2Member(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Member, self).setUp() @property def _tested_entity(self): return 'member' def test_create(self): with mock.patch.object(lb_utils, 'validate_lb_subnet' ) as mock_validate_lb_subnet, \ mock.patch.object(self.lbv2_driver.plugin, 'get_pool_members' ) as mock_get_pool_members, \ mock.patch.object(lb_utils, 'get_network_from_subnet' ) as mock_get_network, \ mock.patch.object(lb_utils, 'get_router_from_network' ) as mock_get_router, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsx_db, 'get_nsx_router_id' ) as mock_get_nsx_router_id, \ mock.patch.object(self.service_client, 'get_router_lb_service' ) as mock_get_lb_service, \ mock.patch.object(nsx_db, 'add_nsx_lbaas_loadbalancer_binding' ) as mock_add_loadbalancer_bidning, \ mock.patch.object(self.service_client, 'add_virtual_server' ) as mock_add_vs_to_service, \ mock.patch.object(self.pool_client, 'get' ) as mock_get_pool, \ mock.patch.object(self.pool_client, 'update_pool_with_members' ) as mock_update_pool_with_members: mock_validate_lb_subnet.return_value = True mock_get_pool_members.return_value = [self.member] mock_get_network.return_value = LB_NETWORK mock_get_router.return_value = LB_ROUTER_ID mock_get_pool_binding.return_value = POOL_BINDING mock_get_lb_binding.return_value = None mock_get_nsx_router_id.return_value = LB_ROUTER_ID mock_get_lb_service.return_value = {'id': LB_SERVICE_ID} mock_get_pool.return_value = LB_POOL self.edge_driver.member.create(self.context, self.member) mock_add_loadbalancer_bidning.assert_called_with( self.context.session, LB_ID, LB_SERVICE_ID, LB_ROUTER_ID, LB_VIP) mock_add_vs_to_service.assert_called_with(LB_SERVICE_ID, LB_VS_ID) mock_update_pool_with_members.assert_called_with(LB_POOL_ID, [LB_MEMBER]) mock_successful_completion = ( self.lbv2_driver.member.successful_completion) mock_successful_completion.assert_called_with(self.context, self.member) def test_create_lbs_no_router_gateway(self): with mock.patch.object(lb_utils, 'validate_lb_subnet' ) as mock_validate_lb_subnet, \ mock.patch.object(self.lbv2_driver.plugin, 'get_pool_members' ) as mock_get_pool_members, \ mock.patch.object(lb_utils, 'get_network_from_subnet' ) as mock_get_network, \ mock.patch.object(lb_utils, 'get_router_from_network' ) as mock_get_router_from_network, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsx_db, 'get_nsx_router_id' ) as mock_get_nsx_router_id, \ mock.patch.object(self.service_client, 'get_router_lb_service' ) as mock_get_lb_service, \ mock.patch.object(self.core_plugin, 'get_router' ) as mock_get_router: mock_validate_lb_subnet.return_value = True mock_get_pool_members.return_value = [self.member] mock_get_network.return_value = LB_NETWORK mock_get_router_from_network.return_value = LB_ROUTER_ID mock_get_pool_binding.return_value = POOL_BINDING mock_get_lb_binding.return_value = None mock_get_nsx_router_id.return_value = LB_ROUTER_ID mock_get_lb_service.return_value = None mock_get_router.return_value = {'id': 'router1-xxx'} self.assertRaises(n_exc.BadRequest, self.edge_driver.member.create, self.context, self.member) def test_update(self): new_member = lb_models.Member(MEMBER_ID, LB_TENANT_ID, POOL_ID, MEMBER_ADDRESS, 80, 2, pool=self.pool, name='member-nnn-nnn') with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.pool_client, 'get' ) as mock_get_pool, \ mock.patch.object(lb_utils, 'get_network_from_subnet' ) as mock_get_network_from_subnet: mock_get_pool_binding.return_value = POOL_BINDING mock_get_pool.return_value = LB_POOL_WITH_MEMBER mock_get_network_from_subnet.return_value = LB_NETWORK self.edge_driver.member.update(self.context, self.member, new_member) mock_successful_completion = ( self.lbv2_driver.member.successful_completion) mock_successful_completion.assert_called_with(self.context, new_member) def test_delete(self): with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.pool_client, 'get' ) as mock_get_pool, \ mock.patch.object(lb_utils, 'get_network_from_subnet' ) as mock_get_network_from_subnet, \ mock.patch.object(self.pool_client, 'update_pool_with_members' ) as mock_update_pool_with_members: mock_get_pool_binding.return_value = POOL_BINDING mock_get_pool.return_value = LB_POOL_WITH_MEMBER mock_get_network_from_subnet.return_value = LB_NETWORK self.edge_driver.member.delete(self.context, self.member) mock_update_pool_with_members.assert_called_with(LB_POOL_ID, []) mock_successful_completion = ( self.lbv2_driver.member.successful_completion) mock_successful_completion.assert_called_with(self.context, self.member, delete=True) class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2HealthMonitor, self).setUp() @property def _tested_entity(self): return 'health_monitor' def test_create(self): with mock.patch.object(self.monitor_client, 'create' ) as mock_create_monitor, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.pool_client, 'add_monitor_to_pool' ) as mock_add_monitor_to_pool, \ mock.patch.object(nsx_db, 'add_nsx_lbaas_monitor_binding' ) as mock_add_monitor_binding: mock_create_monitor.return_value = {'id': LB_MONITOR_ID} mock_get_pool_binding.return_value = POOL_BINDING self.edge_driver.healthmonitor.create(self.context, self.hm) mock_add_monitor_to_pool.assert_called_with(LB_POOL_ID, LB_MONITOR_ID) mock_add_monitor_binding.assert_called_with( self.context.session, LB_ID, POOL_ID, HM_ID, LB_MONITOR_ID, LB_POOL_ID) mock_successful_completion = ( self.lbv2_driver.health_monitor.successful_completion) mock_successful_completion.assert_called_with(self.context, self.hm) def test_update(self): new_hm = lb_models.HealthMonitor(HM_ID, LB_TENANT_ID, 'PING', 3, 3, 3, pool=self.pool) self.edge_driver.healthmonitor.update(self.context, self.hm, new_hm) mock_successful_completion = ( self.lbv2_driver.health_monitor.successful_completion) mock_successful_completion.assert_called_with(self.context, new_hm) def test_delete(self): with mock.patch.object(nsx_db, 'get_nsx_lbaas_monitor_binding' ) as mock_get_monitor_binding, \ mock.patch.object(self.pool_client, 'remove_monitor_from_pool' ) as mock_remove_monitor_from_pool, \ mock.patch.object(self.monitor_client, 'delete' ) as mock_delete_monitor, \ mock.patch.object(nsx_db, 'delete_nsx_lbaas_monitor_binding' ) as mock_delete_monitor_binding: mock_get_monitor_binding.return_value = HM_BINDING self.edge_driver.healthmonitor.delete(self.context, self.hm) mock_remove_monitor_from_pool.assert_called_with(LB_POOL_ID, LB_MONITOR_ID) mock_delete_monitor.assert_called_with(LB_MONITOR_ID) mock_delete_monitor_binding.assert_called_with( self.context.session, LB_ID, POOL_ID, HM_ID) mock_successful_completion = ( self.lbv2_driver.health_monitor.successful_completion) mock_successful_completion.assert_called_with(self.context, self.hm, delete=True) class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2L7Policy, self).setUp() @property def _tested_entity(self): return 'l7policy' def test_create(self): with mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.rule_client, 'create' ) as mock_create_rule, \ mock.patch.object(self.vs_client, 'get' ) as mock_get_virtual_server, \ mock.patch.object(self.vs_client, 'update' ) as mock_update_virtual_server, \ mock.patch.object(nsx_db, 'add_nsx_lbaas_l7policy_binding' ) as mock_add_l7policy_binding: mock_get_listener_binding.return_value = LISTENER_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_create_rule.return_value = {'id': LB_RULE_ID} mock_get_virtual_server.return_value = {'id': LB_VS_ID} self.edge_driver.l7policy.create(self.context, self.l7policy) mock_update_virtual_server.assert_called_with( LB_VS_ID, rule_ids=[LB_RULE_ID]) mock_add_l7policy_binding.assert_called_with( self.context.session, L7POLICY_ID, LB_RULE_ID, LB_VS_ID) mock_successful_completion = ( self.lbv2_driver.l7policy.successful_completion) mock_successful_completion.assert_called_with(self.context, self.l7policy) def test_update(self): new_l7policy = lb_models.L7Policy(L7POLICY_ID, LB_TENANT_ID, name='new-policy', listener_id=LISTENER_ID, action='REJECT', listener=self.listener, position=2) vs_with_rules = { 'id': LB_VS_ID, 'rule_ids': [LB_RULE_ID, 'abc', 'xyz'] } rule_body = { 'match_conditions': [], 'actions': [{ 'type': 'LbHttpRejectAction', 'reply_status': '403'}], 'phase': 'HTTP_FORWARDING', 'match_strategy': 'ALL' } with mock.patch.object(nsx_db, 'get_nsx_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.rule_client, 'update' ) as mock_update_rule, \ mock.patch.object(self.vs_client, 'get' ) as mock_get_virtual_server, \ mock.patch.object(self.vs_client, 'update' ) as mock_update_virtual_server: mock_get_l7policy_binding.return_value = L7POLICY_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_get_virtual_server.return_value = vs_with_rules self.edge_driver.l7policy.update(self.context, self.l7policy, new_l7policy) mock_update_rule.assert_called_with(LB_RULE_ID, **rule_body) mock_update_virtual_server.assert_called_with( LB_VS_ID, rule_ids=['abc', LB_RULE_ID, 'xyz']) mock_successful_completion = ( self.lbv2_driver.l7policy.successful_completion) mock_successful_completion.assert_called_with(self.context, new_l7policy) def test_delete(self): with mock.patch.object(nsx_db, 'get_nsx_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(self.vs_client, 'remove_rule' ) as mock_vs_remove_rule, \ mock.patch.object(self.rule_client, 'delete' ) as mock_delete_rule, \ mock.patch.object(nsx_db, 'delete_nsx_lbaas_l7policy_binding' ) as mock_delete_l7policy_binding: mock_get_l7policy_binding.return_value = L7POLICY_BINDING self.edge_driver.l7policy.delete(self.context, self.l7policy) mock_vs_remove_rule.assert_called_with(LB_VS_ID, LB_RULE_ID) mock_delete_rule.assert_called_with(LB_RULE_ID) mock_delete_l7policy_binding.assert_called_with( self.context.session, L7POLICY_ID) mock_successful_completion = ( self.lbv2_driver.l7policy.successful_completion) mock_successful_completion.assert_called_with( self.context, self.l7policy, delete=True) class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2L7Rule, self).setUp() @property def _tested_entity(self): return 'l7rule' def test_create(self): self.l7policy.rules = [self.l7rule] create_rule_body = { 'match_conditions': [{ 'type': 'LbHttpRequestHeaderCondition', 'match_type': 'EQUALS', 'header_name': self.l7rule.key, 'header_value': self.l7rule.value}], 'actions': [{ 'type': 'LbSelectPoolAction', 'pool_id': LB_POOL_ID}], 'phase': 'HTTP_FORWARDING', 'match_strategy': 'ALL' } with mock.patch.object(nsx_db, 'get_nsx_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.rule_client, 'update' ) as mock_update_rule: mock_get_l7policy_binding.return_value = L7POLICY_BINDING mock_get_pool_binding.return_value = POOL_BINDING self.edge_driver.l7rule.create(self.context, self.l7rule) mock_update_rule.assert_called_with(LB_RULE_ID, **create_rule_body) mock_successful_completion = ( self.lbv2_driver.l7rule.successful_completion) mock_successful_completion.assert_called_with(self.context, self.l7rule, delete=False) def test_update(self): new_l7rule = lb_models.L7Rule(L7RULE_ID, LB_TENANT_ID, l7policy_id=L7POLICY_ID, compare_type='STARTS_WITH', invert=True, type='COOKIE', key='cookie1', value='xxxxx', policy=self.l7policy) self.l7policy.rules = [new_l7rule] update_rule_body = { 'match_conditions': [{ 'type': 'LbHttpRequestHeaderCondition', 'match_type': 'STARTS_WITH', 'header_name': 'Cookie', 'header_value': 'cookie1=xxxxx'}], 'actions': [{ 'type': 'LbSelectPoolAction', 'pool_id': LB_POOL_ID}], 'phase': 'HTTP_FORWARDING', 'match_strategy': 'ALL' } with mock.patch.object(nsx_db, 'get_nsx_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.rule_client, 'update' ) as mock_update_rule: mock_get_l7policy_binding.return_value = L7POLICY_BINDING mock_get_pool_binding.return_value = POOL_BINDING self.edge_driver.l7rule.update(self.context, self.l7rule, new_l7rule) mock_update_rule.assert_called_with(LB_RULE_ID, **update_rule_body) mock_successful_completion = ( self.lbv2_driver.l7rule.successful_completion) mock_successful_completion.assert_called_with(self.context, new_l7rule, delete=False) def test_delete(self): self.l7policy.rules = [self.l7rule] delete_rule_body = { 'match_conditions': [], 'actions': [{ 'type': 'LbSelectPoolAction', 'pool_id': LB_POOL_ID}], 'phase': 'HTTP_FORWARDING', 'match_strategy': 'ALL' } with mock.patch.object(nsx_db, 'get_nsx_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.rule_client, 'update' ) as mock_update_rule: mock_get_l7policy_binding.return_value = L7POLICY_BINDING mock_get_pool_binding.return_value = POOL_BINDING self.edge_driver.l7rule.delete(self.context, self.l7rule) mock_update_rule.assert_called_with(LB_RULE_ID, **delete_rule_body) mock_successful_completion = ( self.lbv2_driver.l7rule.successful_completion) mock_successful_completion.assert_called_with(self.context, self.l7rule, delete=True) vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/lbaas/__init__.py0000666000175100017510000000000013244523345025253 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/ipam/0000775000175100017510000000000013244524600023011 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/ipam/test_nsxv3_driver.py0000666000175100017510000001372013244523345027070 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from oslo_config import cfg from oslo_utils import uuidutils from vmware_nsx.tests.unit.nsx_v3 import test_plugin from vmware_nsxlib.v3 import exceptions as nsx_lib_exc from vmware_nsxlib.v3 import nsx_constants as error class MockIPPools(object): def patch_nsxlib_ipam(self): self.nsx_pools = {} def _create_pool(*args, **kwargs): pool_id = uuidutils.generate_uuid() gateway_ip = None if kwargs.get('gateway_ip'): gateway_ip = str(kwargs['gateway_ip']) subnet = {"allocation_ranges": kwargs.get('allocation_ranges'), "gateway_ip": gateway_ip, "cidr": args[0]} pool = {'id': pool_id, 'subnets': [subnet]} self.nsx_pools[pool_id] = {'pool': pool, 'allocated': []} return {'id': pool_id} def _update_pool(pool_id, **kwargs): pool = self.nsx_pools[pool_id]['pool'] subnet = pool['subnets'][0] if 'gateway_ip' in kwargs: if kwargs['gateway_ip']: subnet["gateway_ip"] = str(kwargs['gateway_ip']) else: del subnet["gateway_ip"] if 'allocation_ranges' in kwargs: if kwargs['allocation_ranges']: subnet["allocation_ranges"] = kwargs['allocation_ranges'] else: del subnet["allocation_ranges"] def _delete_pool(pool_id): del self.nsx_pools[pool_id] def _get_pool(pool_id): return self.nsx_pools[pool_id]['pool'] def _allocate_ip(*args, **kwargs): nsx_pool = self.nsx_pools[args[0]] if kwargs.get('ip_addr'): ip_addr = netaddr.IPAddress(kwargs['ip_addr']) # verify that this ip was not yet allocated if ip_addr in nsx_pool['allocated']: raise nsx_lib_exc.ManagerError( manager='dummy', operation='allocate', details='IP already allocated', error_code=error.ERR_CODE_IPAM_IP_ALLOCATED) # skip ip validation for this mock. nsx_pool['allocated'].append(ip_addr) return {'allocation_id': str(ip_addr)} # get an unused ip from the pool ranges = nsx_pool['pool']['subnets'][0]['allocation_ranges'] for ip_range in ranges: r = netaddr.IPRange(ip_range['start'], ip_range['end']) for ip_addr in r: if ip_addr not in nsx_pool['allocated']: nsx_pool['allocated'].append(ip_addr) return {'allocation_id': str(ip_addr)} # no IP was found raise nsx_lib_exc.ManagerError( manager='dummy', operation='allocate', details='All IPs in the pool are allocated', error_code=error.ERR_CODE_IPAM_POOL_EXHAUSTED) def _release_ip(*args, **kwargs): nsx_pool = self.nsx_pools[args[0]] ip_addr = netaddr.IPAddress(args[1]) nsx_pool['allocated'].remove(ip_addr) mock.patch( "vmware_nsxlib.v3.resources.IpPool.get", side_effect=_get_pool).start() mock.patch( "vmware_nsxlib.v3.resources.IpPool.create", side_effect=_create_pool).start() mock.patch( "vmware_nsxlib.v3.resources.IpPool.update", side_effect=_update_pool).start() mock.patch( "vmware_nsxlib.v3.resources.IpPool.delete", side_effect=_delete_pool).start() mock.patch( "vmware_nsxlib.v3.resources.IpPool.allocate", side_effect=_allocate_ip).start() mock.patch( "vmware_nsxlib.v3.resources.IpPool.release", side_effect=_release_ip).start() class TestNsxv3IpamSubnets(test_plugin.TestSubnetsV2, MockIPPools): """Run the nsxv3 plugin subnets tests with the ipam driver.""" def setUp(self): cfg.CONF.set_override( "ipam_driver", "vmware_nsx.services.ipam.nsx_v3.driver.Nsxv3IpamDriver") super(TestNsxv3IpamSubnets, self).setUp() self.patch_nsxlib_ipam() def test_subnet_update_ipv4_and_ipv6_pd_slaac_subnets(self): self.skipTest('Update ipam subnet is not supported') def test_subnet_update_ipv4_and_ipv6_pd_v6stateless_subnets(self): self.skipTest('Update ipam subnet is not supported') class TestNsxv3IpamPorts(test_plugin.TestPortsV2, MockIPPools): """Run the nsxv3 plugin ports tests with the ipam driver.""" def setUp(self): cfg.CONF.set_override( "ipam_driver", "vmware_nsx.services.ipam.nsx_v3.driver.Nsxv3IpamDriver") super(TestNsxv3IpamPorts, self).setUp() self.patch_nsxlib_ipam() def test_create_port_invalid_fixed_ip_address_v6_pd_slaac(self): self.skipTest('Update ipam subnet is not supported') def test_update_port_invalid_subnet_v6_pd_slaac(self): self.skipTest('Update ipam subnet is not supported') def test_update_port_update_ip_address_only(self): self.skipTest('Update ipam subnet is not supported') def test_update_port_invalid_fixed_ip_address_v6_pd_slaac(self): self.skipTest('Update ipam subnet is not supported') vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/ipam/test_nsxv_driver.py0000666000175100017510000001221113244523345026777 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from vmware_nsx.tests.unit.nsx_v import test_plugin from neutron_lib.api.definitions import provider_net as pnet class TestNsxvIpamSubnets(test_plugin.TestSubnetsV2): """Run the nsxv plugin subnets tests with the ipam driver""" def setUp(self): cfg.CONF.set_override( "ipam_driver", "vmware_nsx.services.ipam.nsx_v.driver.NsxvIpamDriver") super(TestNsxvIpamSubnets, self).setUp() def provider_net(self): name = 'dvs-provider-net' providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 43, pnet.PHYSICAL_NETWORK: 'dvs-uuid'} return self.network(name=name, do_delete=False, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)) def test_provider_net_use_driver(self): with self.provider_net() as net: before = len(self.fc2._ipam_pools) with self.subnet(network=net, cidr='10.10.10.0/29', enable_dhcp=False): self.assertEqual(before + 1, len(self.fc2._ipam_pools)) def test_ext_net_use_driver(self): with self.network(router__external=True) as net: before = len(self.fc2._ipam_pools) with self.subnet(network=net, cidr='10.10.10.0/29', enable_dhcp=False): self.assertEqual(before + 1, len(self.fc2._ipam_pools)) def test_regular_net_dont_use_driver(self): with self.network() as net: before = len(self.fc2._ipam_pools) with self.subnet(network=net, cidr='10.10.10.0/29', enable_dhcp=False): self.assertEqual(before, len(self.fc2._ipam_pools)) def test_no_more_ips(self): # create a small provider network, and use all the IPs with self.provider_net() as net: with self.subnet(network=net, cidr='10.10.10.0/29', enable_dhcp=False) as subnet: # create ports on this subnet until there are no more free ips # legal ips are 10.10.10.2 - 10.10.10.6 fixed_ips = [{'subnet_id': subnet['subnet']['id']}] for counter in range(5): port_res = self._create_port( self.fmt, net['network']['id'], fixed_ips=fixed_ips) port = self.deserialize('json', port_res) self.assertIn('port', port) # try to create another one - should fail port_res = self._create_port( self.fmt, net['network']['id'], fixed_ips=fixed_ips) port = self.deserialize('json', port_res) self.assertIn('NeutronError', port) self.assertIn('message', port['NeutronError']) self.assertTrue(('No more IP addresses available' in port['NeutronError']['message'])) def test_use_same_ips(self): # create a provider network and try to allocate the same ip twice with self.provider_net() as net: with self.subnet(network=net, cidr='10.10.10.0/24', enable_dhcp=False) as subnet: fixed_ips = [{'ip_address': '10.10.10.2', 'subnet_id': subnet['subnet']['id']}] # First port should succeed port_res = self._create_port( self.fmt, net['network']['id'], fixed_ips=fixed_ips) port = self.deserialize('json', port_res) self.assertIn('port', port) # try to create another one - should fail port_res = self._create_port( self.fmt, net['network']['id'], fixed_ips=fixed_ips) port = self.deserialize('json', port_res) self.assertIn('NeutronError', port) self.assertIn('message', port['NeutronError']) self.assertTrue(('already allocated in subnet' in port['NeutronError']['message'])) class TestNsxvIpamPorts(test_plugin.TestPortsV2): """Run the nsxv plugin ports tests with the ipam driver""" def setUp(self): cfg.CONF.set_override( "ipam_driver", "vmware_nsx.services.ipam.nsx_v.driver.NsxvIpamDriver") super(TestNsxvIpamPorts, self).setUp() vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/ipam/__init__.py0000666000175100017510000000000013244523345025117 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/l2gateway/0000775000175100017510000000000013244524600023762 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/l2gateway/test_nsxv3_driver.py0000666000175100017510000003035013244523345030037 0ustar zuulzuul00000000000000# Copyright (c) 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from networking_l2gw.db.l2gateway import l2gateway_db from networking_l2gw.services.l2gateway.common import config from networking_l2gw.services.l2gateway.common import constants from networking_l2gw.services.l2gateway import exceptions as l2gw_exc from networking_l2gw.services.l2gateway import plugin as core_l2gw_plugin from networking_l2gw.tests.unit.db import test_l2gw_db from oslo_config import cfg from oslo_utils import importutils from oslo_utils import uuidutils from neutron.tests import base from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import context from neutron_lib import exceptions as n_exc from vmware_nsx.services.l2gateway.nsx_v3 import driver as nsx_v3_driver from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsx_v3_plugin from vmware_nsxlib.tests.unit.v3 import mocks as nsx_v3_mocks from vmware_nsxlib.v3 import nsx_constants NSX_V3_PLUGIN_CLASS = ('vmware_nsx.plugins.nsx_v3.plugin.NsxV3Plugin') NSX_V3_L2GW_DRIVER_CLASS_PATH = ('vmware_nsx.services.l2gateway.' 'nsx_v3.driver.NsxV3Driver') class TestNsxV3L2GatewayDriver(test_l2gw_db.L2GWTestCase, test_nsx_v3_plugin.NsxV3PluginTestCaseMixin, base.BaseTestCase): def setUp(self): super(TestNsxV3L2GatewayDriver, self).setUp() self.core_plugin = importutils.import_object(NSX_V3_PLUGIN_CLASS) self.driver = nsx_v3_driver.NsxV3Driver(mock.MagicMock()) mock.patch.object(config, 'register_l2gw_opts_helper') mock.patch('neutron.services.service_base.load_drivers', return_value=({'dummyprovider': self.driver}, 'dummyprovider')).start() mock.patch.object(l2gateway_db.L2GatewayMixin, '__init__'), mock.patch.object(l2gateway_db, 'subscribe') mock.patch('neutron.db.servicetype_db.ServiceTypeManager.get_instance', return_value=mock.MagicMock()).start() self.l2gw_plugin = core_l2gw_plugin.L2GatewayPlugin() self.context = context.get_admin_context() def _get_nw_data(self): net_data = super(TestNsxV3L2GatewayDriver, self)._get_nw_data() net_data['network']['port_security_enabled'] = True return net_data def test_nsxl2gw_driver_init(self): with mock.patch.object(nsx_v3_driver.NsxV3Driver, 'subscribe_callback_notifications') as sub: with mock.patch.object(nsx_v3_driver.LOG, 'debug') as debug: nsx_v3_driver.NsxV3Driver(mock.MagicMock()) self.assertTrue(sub.called) self.assertTrue(debug.called) def test_create_default_l2_gateway(self): def_bridge_cluster_name = nsx_v3_mocks.NSX_BRIDGE_CLUSTER_NAME cfg.CONF.set_override("default_bridge_cluster", def_bridge_cluster_name, "nsx_v3") nsx_v3_driver.NsxV3Driver(mock.MagicMock()) # fake the callback invoked after init registry.publish(resources.PROCESS, events.BEFORE_SPAWN, mock.MagicMock()) l2gws = self.driver._get_l2_gateways(self.context) def_bridge_cluster_id = ( self.nsxlib.bridge_cluster.get_id_by_name_or_id( def_bridge_cluster_name)) def_l2gw = None for l2gw in l2gws: for device in l2gw['devices']: if device['device_name'] == def_bridge_cluster_id: def_l2gw = l2gw self.assertIsNotNone(def_l2gw) self.assertTrue(def_l2gw.devices[0].device_name, def_bridge_cluster_id) self.assertTrue(def_l2gw.devices[0].interfaces[0].interface_name, 'default-bridge-cluster') def test_create_duplicate_default_l2_gateway_noop(self): def_bridge_cluster_name = nsx_v3_mocks.NSX_BRIDGE_CLUSTER_NAME cfg.CONF.set_override("default_bridge_cluster", def_bridge_cluster_name, "nsx_v3") for i in range(0, 2): nsx_v3_driver.NsxV3Driver(mock.MagicMock()) # fake the callback invoked after init registry.publish(resources.PROCESS, events.BEFORE_SPAWN, mock.MagicMock()) l2gws = self.driver._get_l2_gateways(self.context) # Verify whether only one default L2 gateway is created self.assertEqual(1, len(l2gws)) def test_create_default_l2_gateway_no_bc_uuid_noop(self): with mock.patch.object(nsx_v3_driver.NsxV3Driver, 'subscribe_callback_notifications'): nsx_v3_driver.NsxV3Driver(mock.MagicMock()) l2gws = self.driver._get_l2_gateways(self.context) # Verify no default L2 gateway is created if bridge cluster id is # not configured in nsx.ini self.assertEqual([], l2gws) def test_create_l2_gateway_multiple_devices_fail(self): invalid_l2gw_dict = { "l2_gateway": { "tenant_id": "fake_tenant_id", "name": "invalid_l2gw", "devices": [{"interfaces": [{"name": "interface1"}], "device_name": "device1"}, {"interfaces": [{"name": "interface_2"}], "device_name": "device2"}]}} self.assertRaises(n_exc.InvalidInput, self.l2gw_plugin.create_l2_gateway, self.context, invalid_l2gw_dict) def test_create_l2_gateway_multiple_interfaces_fail(self): invalid_l2gw_dict = { "l2_gateway": { "tenant_id": "fake_tenant_id", "name": "invalid_l2gw", "devices": [{"interfaces": [{"name": "interface1"}, {"name": "interface2"}], "device_name": "device1"}]}} self.assertRaises(n_exc.InvalidInput, self.l2gw_plugin.create_l2_gateway, self.context, invalid_l2gw_dict) def test_create_l2_gateway_invalid_device_name_fail(self): invalid_l2gw_dict = { "l2_gateway": { "tenant_id": "fake_tenant_id", "name": "invalid_l2gw", "devices": [{"interfaces": [{"name": "interface_1"}], "device_name": "device-1"}]}} self.assertRaises(n_exc.InvalidInput, self.l2gw_plugin.create_l2_gateway, self.context, invalid_l2gw_dict) def test_create_l2_gateway_valid(self): bc_uuid = uuidutils.generate_uuid() l2gw_data = self._get_l2_gateway_data(name='gw1', device_name=bc_uuid) l2gw = self.l2gw_plugin.create_l2_gateway(self.context, l2gw_data) self.assertIsNotNone(l2gw) self.assertEqual("gw1", l2gw["name"]) self.assertEqual("port1", l2gw["devices"][0]["interfaces"][0]["name"]) self.assertEqual(bc_uuid, l2gw["devices"][0]["device_name"]) def test_create_l2_gateway_connection(self): type(self.driver)._core_plugin = self.core_plugin bc_uuid = uuidutils.generate_uuid() l2gw_data = self._get_l2_gateway_data(name='def-l2gw', device_name=bc_uuid) l2gw = self._create_l2gateway(l2gw_data) net_data = self._get_nw_data() net = self.core_plugin.create_network(self.context, net_data) l2gw_conn_data = {constants.CONNECTION_RESOURCE_NAME: { 'l2_gateway_id': l2gw['id'], 'tenant_id': 'fake_tenant_id', 'network_id': net['id']}} l2gw_conn = self.l2gw_plugin.create_l2_gateway_connection( self.context, l2gw_conn_data) self.assertIsNotNone(l2gw_conn) self.assertEqual(net['id'], l2gw_conn['network_id']) self.assertEqual(l2gw['id'], l2gw_conn['l2_gateway_id']) def test_delete_l2_gateway_connection(self): type(self.driver)._core_plugin = self.core_plugin bc_uuid = uuidutils.generate_uuid() l2gw_data = self._get_l2_gateway_data(name='def-l2gw', device_name=bc_uuid) l2gw = self._create_l2gateway(l2gw_data) net_data = self._get_nw_data() net = self.core_plugin.create_network(self.context, net_data) l2gw_conn_data = {constants.CONNECTION_RESOURCE_NAME: { 'l2_gateway_id': l2gw['id'], 'tenant_id': 'fake_tenant_id', 'network_id': net['id']}} l2gw_conn = self.l2gw_plugin.create_l2_gateway_connection( self.context, l2gw_conn_data) self.l2gw_plugin.delete_l2_gateway_connection(self.context, l2gw_conn['id']) # Verify that the L2 gateway connection was deleted self.assertRaises(l2gw_exc.L2GatewayConnectionNotFound, self.l2gw_plugin.get_l2_gateway_connection, self.context, l2gw_conn['id']) ports = self.core_plugin.get_ports(self.context) # Verify that the L2 gateway connection port was cleaned up self.assertEqual(0, len(ports)) def test_create_l2_gateway_connection_creates_port(self): type(self.driver)._core_plugin = self.core_plugin bc_uuid = uuidutils.generate_uuid() l2gw_data = self._get_l2_gateway_data(name='def-l2gw', device_name=bc_uuid) l2gw = self._create_l2gateway(l2gw_data) net_data = self._get_nw_data() net = self.core_plugin.create_network(self.context, net_data) l2gw_conn_data = { 'id': uuidutils.generate_uuid(), 'l2_gateway_id': l2gw['id'], 'tenant_id': 'fake_tenant_id', 'network_id': net['id']} self.driver.create_l2_gateway_connection_postcommit(self.context, l2gw_conn_data) ports = self.core_plugin.get_ports(self.context) # Verify that the L2 gateway connection port was created with device # owner BRIDGEENDPOINT self.assertEqual(1, len(ports)) port = ports[0] self.assertEqual(nsx_constants.BRIDGE_ENDPOINT, port['device_owner']) # Verify that the L2 gateway connection port was created with no # fixed ips self.assertEqual(0, len(port.get('fixed_ips'))) def test_core_plugin_delete_l2_gateway_connection_port_fail(self): type(self.driver)._core_plugin = self.core_plugin bc_uuid = uuidutils.generate_uuid() l2gw_data = self._get_l2_gateway_data(name='def-l2gw', device_name=bc_uuid) l2gw = self._create_l2gateway(l2gw_data) net_data = self._get_nw_data() net = self.core_plugin.create_network(self.context, net_data) l2gw_conn_data = { 'id': uuidutils.generate_uuid(), 'l2_gateway_id': l2gw['id'], 'tenant_id': 'fake_tenant_id', 'network_id': net['id']} self.driver.create_l2_gateway_connection_postcommit(self.context, l2gw_conn_data) port = self.core_plugin.get_ports(self.context)[0] self.assertRaises(n_exc.ServicePortInUse, self.core_plugin.delete_port, self.context, port['id']) vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/l2gateway/test_nsxv_driver.py0000666000175100017510000002462613244523345027765 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests import base from networking_l2gw.db.l2gateway import l2gateway_db from neutron_lib import context from neutron_lib import exceptions as n_exc from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import nsxv_db from vmware_nsx.dvs import dvs_utils from vmware_nsx.services.l2gateway.nsx_v import driver as nsx_v_driver from vmware_nsx.tests.unit.nsx_v import test_plugin CORE_PLUGIN = "vmware_nsx.plugins.nsx_v.plugin.NsxVPluginV2" class TestL2gatewayDriver(base.BaseTestCase): def setUp(self): super(TestL2gatewayDriver, self).setUp() self.context = context.get_admin_context() self.plugin = nsx_v_driver.NsxvL2GatewayDriver(mock.MagicMock()) def test_validate_device_with_multi_devices(self): fake_l2gw_dict = {"l2_gateway": {"tenant_id": "fake__tenant_id", "name": "fake_l2gw", "devices": [{"interfaces": [{"name": "fake_inter"}], "device_name": "fake_dev"}, {"interfaces": [{"name": "fake_inter_1"}], "device_name": "fake_dev_1"}]}} with mock.patch.object(l2gateway_db.L2GatewayMixin, '_admin_check'): self.assertRaises(n_exc.InvalidInput, self.plugin.create_l2_gateway, self.context, fake_l2gw_dict) def test_validate_interface_with_multi_interfaces(self): fake_l2gw_dict = {"l2_gateway": {"tenant_id": "fake_tenant_id", "name": "fake_l2gw", "devices": [{"interfaces": [{"name": "fake_inter_1"}, {"name": "fake_inter_2"}], "device_name": "fake_dev"}]}} with mock.patch.object(l2gateway_db.L2GatewayMixin, '_admin_check'): self.assertRaises(n_exc.InvalidInput, self.plugin.create_l2_gateway, self.context, fake_l2gw_dict) @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._nsxv') def test_validate_interface_with_invalid_interfaces(self, _nsxv): fake_interfaces = [{"name": "fake_inter"}] _nsxv.vcns.validate_network.return_value = False self.assertRaises(n_exc.InvalidInput, self.plugin._validate_interface_list, self.context, fake_interfaces) @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._edge_manager') def test_create_gw_edge_failure(self, edge_manager): with mock.patch.object(nsxv_db, 'get_nsxv_router_binding', return_value=None): self.assertRaises(nsx_exc.NsxL2GWDeviceNotFound, self.plugin._create_l2_gateway_edge, self.context) @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' 'L2GatewayMixin._admin_check') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._validate_device_list') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._validate_interface_list') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._create_l2_gateway_edge') @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' 'L2GatewayMixin.create_l2_gateway') def test_create_l2_gateway_failure(self, create_l2gw, _create_l2gw_edge, val_inter, val_dev, _admin_check): fake_l2gw_dict = {"l2_gateway": {"tenant_id": "fake_teannt_id", "name": "fake_l2gw", "devices": [{"interfaces": [{"name": "fake_inter"}], "device_name": "fake_dev"}]}} _create_l2gw_edge.side_effect = nsx_exc.NsxL2GWDeviceNotFound self.assertRaises(nsx_exc.NsxL2GWDeviceNotFound, self.plugin.create_l2_gateway, self.context, fake_l2gw_dict) @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' 'L2GatewayMixin._admin_check') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._validate_device_list') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._validate_interface_list') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._create_l2_gateway_edge') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._edge_manager') def test_create_l2_gateway(self, edge_manager, _create_l2gw_edge, val_inter, val_dev, _admin_check): fake_l2gw_dict = {"l2_gateway": {"tenant_id": "fake_teannt_id", "name": "fake_l2gw", "devices": [{"interfaces": [{"name": "fake_inter"}], "device_name": "fake_dev"}]}} fake_devices = [{"interfaces": [{"name": "fake_inter"}], "device_name": "fake_dev"}] fake_interfaces = [{"name": "fake_inter"}] _create_l2gw_edge.return_value = 'fake_dev' self.plugin.create_l2_gateway(self.context, fake_l2gw_dict) _admin_check.assert_called_with(self.context, 'CREATE') val_dev.assert_called_with(fake_devices) val_inter.assert_called_with(self.context, fake_interfaces) @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' 'L2GatewayMixin._admin_check') @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' 'L2GatewayMixin.get_l2_gateway_connection') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._get_device') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._nsxv') def test_delete_l2_gateway_connection(self, nsxv, get_devices, get_conn, admin_check): fake_conn_dict = {'l2_gateway_id': 'fake_l2gw_id'} fake_device_dict = {'id': 'fake_dev_id', 'device_name': 'fake_dev_name'} get_conn.return_value = fake_conn_dict get_devices.return_value = fake_device_dict self.plugin.delete_l2_gateway_connection(self.context, fake_conn_dict) admin_check.assert_called_with(self.context, 'DELETE') get_conn.assert_called_with(self.context, fake_conn_dict) get_devices.assert_called_with(self.context, 'fake_l2gw_id') self.plugin._nsxv().del_bridge.asert_called_with('fake_dev_name') @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' 'L2GatewayMixin._admin_check') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._get_device') @mock.patch('vmware_nsx.db.' 'nsxv_db.get_nsxv_router_binding_by_edge') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._edge_manager') def test_delete_l2_gateway(self, edge_manager, get_nsxv_router, get_devices, admin_check): fake_device_dict = {"id": "fake_dev_id", "device_name": "fake_edge_name", "l2_gateway_id": "fake_l2gw_id"} fake_rtr_binding = {"router_id": 'fake_router_id'} get_devices.return_value = fake_device_dict get_nsxv_router.return_value = fake_rtr_binding self.plugin.delete_l2_gateway(self.context, 'fake_l2gw_id') admin_check.assert_called_with(self.context, 'DELETE') get_devices.assert_called_with(self.context, 'fake_l2gw_id') get_nsxv_router.assert_called_with(self.context.session, "fake_edge_name") class TestL2GatewayDriverRouter(test_plugin.NsxVPluginV2TestCase): @mock.patch.object(dvs_utils, 'dvs_create_session') def setUp(self, *mocks): # init the nsxv plugin, edge manager and fake vcns super(TestL2GatewayDriverRouter, self).setUp(plugin=CORE_PLUGIN, ext_mgr=None) self.context = context.get_admin_context() # init the L2 gateway driver self.driver = nsx_v_driver.NsxvL2GatewayDriver(mock.MagicMock()) @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._validate_device_list') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._validate_interface_list') def test_create_l2_gateway_router(self, val_inter, val_dev): # Verify that creating the router doesn't fail fake_l2gw_dict = {"l2_gateway": {"tenant_id": "fake_teannt_id", "name": "fake_l2gw", "devices": [{"interfaces": [{"name": "fake_inter"}], "device_name": "fake_dev"}]}} self.driver.create_l2_gateway(self.context, fake_l2gw_dict) def test_create_l2_gateway_router_edge(self): # Verify that the router edge is really created edge_id = self.driver._create_l2_gateway_edge(self.context) self.assertEqual('edge-1', edge_id) vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/l2gateway/__init__.py0000666000175100017510000000000013244523345026070 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/__init__.py0000666000175100017510000000000013244523345024171 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/dynamic_routing/0000775000175100017510000000000013244524600025256 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/dynamic_routing/test_nsxv_bgp_driver.py0000666000175100017510000003074713244523345032112 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from neutron.api import extensions from neutron_dynamic_routing.db import bgp_db # noqa from neutron_dynamic_routing import extensions as dr_extensions from neutron_dynamic_routing.extensions import bgp as ext_bgp from neutron_dynamic_routing.tests.unit.db import test_bgp_db from neutron_lib.api.definitions import address_scope from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from vmware_nsx.common import exceptions as exc from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.drivers import ( shared_router_driver as router_driver) from vmware_nsx.services.dynamic_routing import bgp_plugin from vmware_nsx.services.dynamic_routing.nsx_v import driver as bgp_driver from vmware_nsx.tests.unit.nsx_v import test_plugin BGP_PLUGIN = 'vmware_nsx.services.dynamic_routing.bgp_plugin.NSXvBgpPlugin' class TestNSXvBgpPlugin(test_plugin.NsxVPluginV2TestCase, test_bgp_db.BgpTests): def setUp(self): extensions.append_api_extensions_path(dr_extensions.__path__) service_plugins = {ext_bgp.BGP_EXT_ALIAS: BGP_PLUGIN} super(TestNSXvBgpPlugin, self).setUp(service_plugins=service_plugins) self.bgp_plugin = bgp_plugin.NSXvBgpPlugin() self.nsxv_driver = self.bgp_plugin.drivers['nsx-v'] self.nsxv_driver._validate_gateway_network = mock.Mock() self.nsxv_driver._validate_bgp_configuration_on_peer_esg = ( mock.Mock()) self.plugin = directory.get_plugin() self.l3plugin = self.plugin self.plugin.init_is_complete = True self.context = context.get_admin_context() self.project_id = 'dummy_project' @contextlib.contextmanager def gw_network(self, external=True, **kwargs): with super(TestNSXvBgpPlugin, self).gw_network(external=external, **kwargs) as gw_net: if external: gw_net['network']['router:external'] = True gw_net['network'][address_scope.IPV4_ADDRESS_SCOPE] = True yield gw_net @contextlib.contextmanager def subnet(self, network=None, **kwargs): if network and network['network'].get('router:external'): kwargs['gateway_ip'] = None kwargs['enable_dhcp'] = False with super(TestNSXvBgpPlugin, self).subnet(network=network, **kwargs) as sub: yield sub @contextlib.contextmanager def router(self, **kwargs): if 'external_gateway_info' in kwargs: kwargs['external_gateway_info']['enable_snat'] = False with super(TestNSXvBgpPlugin, self).router(**kwargs) as r: yield r @contextlib.contextmanager def esg_bgp_peer(self, esg_id): data = {'name': '', 'peer_ip': '192.168.1.10', 'remote_as': '65000', 'esg_id': esg_id, 'auth_type': 'none', 'password': '', 'tenant_id': self.project_id} bgp_peer = self.bgp_plugin.create_bgp_peer(self.context, {'bgp_peer': data}) yield bgp_peer self.bgp_plugin.delete_bgp_peer(self.context, bgp_peer['id']) @contextlib.contextmanager def bgp_speaker(self, ip_version, local_as, name='my-speaker', advertise_fip_host_routes=True, advertise_tenant_networks=True, networks=None, peers=None): data = {'ip_version': ip_version, test_bgp_db.ADVERTISE_FIPS_KEY: advertise_fip_host_routes, 'advertise_tenant_networks': advertise_tenant_networks, 'local_as': local_as, 'name': name, 'tenant_id': self.project_id} bgp_speaker = self.bgp_plugin.create_bgp_speaker(self.context, {'bgp_speaker': data}) bgp_speaker_id = bgp_speaker['id'] if networks: for network_id in networks: self.bgp_plugin.add_gateway_network( self.context, bgp_speaker_id, {'network_id': network_id}) if peers: for peer_id in peers: self.bgp_plugin.add_bgp_peer(self.context, bgp_speaker_id, {'bgp_peer_id': peer_id}) yield self.bgp_plugin.get_bgp_speaker(self.context, bgp_speaker_id) def test_create_v6_bgp_speaker(self): fake_bgp_speaker = { "bgp_speaker": { "ip_version": 6, "local_as": "1000", "name": "bgp-speaker", "tenant_id": self.project_id } } self.assertRaises(n_exc.InvalidInput, self.bgp_plugin.create_bgp_speaker, self.context, fake_bgp_speaker) def test_create_v6_bgp_peer(self): fake_bgp_peer = { "bgp_peer": { "auth_type": "none", "remote_as": "1000", "name": "bgp-peer", "peer_ip": "fc00::/7", "tenant_id": self.project_id } } self.assertRaises(n_exc.InvalidInput, self.bgp_plugin.create_bgp_peer, self.context, fake_bgp_peer) def test_bgp_peer_esg_id(self): edge_id = 'edge-123' with self.esg_bgp_peer(esg_id='edge-123') as esg_peer: self.assertEqual(edge_id, esg_peer['esg_id']) peer_id = esg_peer['id'] bgp_peer = self.bgp_plugin.get_bgp_peer(self.context, peer_id) self.assertEqual(edge_id, bgp_peer['esg_id']) def test_create_bgp_peer_md5_auth_no_password(self): bgp_peer = {'bgp_peer': {'auth_type': 'md5', 'password': None, 'peer_ip': '10.0.0.3', 'tenant_id': self.project_id}} self.assertRaises(ext_bgp.InvalidBgpPeerMd5Authentication, self.bgp_plugin.create_bgp_peer, self.context, bgp_peer) def test_add_non_external_gateway_network(self): self.nsxv_driver._validate_gateway_network = ( bgp_driver.NSXvBgpDriver( self.bgp_plugin)._validate_gateway_network) with self.gw_network(external=False) as net,\ self.subnetpool_with_address_scope(4, prefixes=['8.0.0.0/8']) as sp: network_id = net['network']['id'] with self.bgp_speaker(sp['ip_version'], 1234) as speaker: self.assertRaises(exc.NsxBgpNetworkNotExternal, self.bgp_plugin.add_gateway_network, self.context, speaker['id'], {'network_id': network_id}) @mock.patch.object(nsxv_db, 'get_nsxv_bgp_speaker_binding', return_value={'bgp_identifier': '10.0.0.11'}) def test_shared_router_on_gateway_clear(self, m1): with self.gw_network(external=True) as net,\ self.subnetpool_with_address_scope(4, prefixes=['10.0.0.0/24']) as sp: with self.subnet(network=net, subnetpool_id=sp['id']) as s1,\ self.bgp_speaker(sp['ip_version'], 1234, networks=[net['network']['id']]): subnet_id = s1['subnet']['id'] gw_info1 = {'network_id': net['network']['id'], 'external_fixed_ips': [{'ip_address': '10.0.0.11', 'subnet_id': subnet_id}]} gw_info2 = {'network_id': net['network']['id'], 'external_fixed_ips': [{'ip_address': '10.0.0.12', 'subnet_id': subnet_id}]} router_obj = router_driver.RouterSharedDriver(self.plugin) with mock.patch.object(self.plugin, '_find_router_driver', return_value=router_obj): with self.router(external_gateway_info=gw_info1) as rtr1,\ self.router(external_gateway_info=gw_info2) as rtr2,\ mock.patch.object( self.nsxv_driver, '_get_router_edge_info', return_value=('edge-1', False)),\ mock.patch.object( self.plugin.edge_manager, 'get_routers_on_same_edge', return_value=[rtr1['id'], rtr2['id']]),\ mock.patch.object( self.nsxv_driver, '_update_edge_bgp_identifier') as up_bgp: gw_clear = {u'router': {u'external_gateway_info': {}}} self.plugin.update_router(self.context, rtr1['id'], gw_clear) up_bgp.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, '10.0.0.12') def test__bgp_speakers_for_gateway_network_by_ip_version(self): # REVISIT(roeyc): Base class test use ipv6 which is not supported. pass def test__bgp_speakers_for_gateway_network_by_ip_version_no_binding(self): # REVISIT(roeyc): Base class test use ipv6 which is not supported. pass def test__tenant_prefixes_by_router_no_gateway_port(self): # REVISIT(roeyc): Base class test use ipv6 which is not supported. pass def test_all_routes_by_bgp_speaker_different_tenant_address_scope(self): # REVISIT(roeyc): Base class test use ipv6 which is not supported. pass def test__get_address_scope_ids_for_bgp_speaker(self): pass def test__get_dvr_fip_host_routes_by_binding(self): pass def test__get_dvr_fip_host_routes_by_router(self): pass def test__get_fip_next_hop_dvr(self): pass def test__get_fip_next_hop_legacy(self): pass def test_get_routes_by_bgp_speaker_id_with_fip_dvr(self): pass def test_ha_router_fips_has_no_next_hop_to_fip_agent_gateway(self): pass def test_legacy_router_fips_has_no_next_hop_to_fip_agent_gateway(self): pass def test_floatingip_update_callback(self): pass def test_get_ipv6_tenant_subnet_routes_by_bgp_speaker_ipv6(self): pass def test_get_routes_by_bgp_speaker_id_with_fip(self): # base class tests uses no-snat router with floating ips self.skipTest('No SNAT with floating ips not supported') def test_get_routes_by_bgp_speaker_binding_with_fip(self): # base class tests uses no-snat router with floating ips self.skipTest('No SNAT with floating ips not supported') def test__get_routes_by_router_with_fip(self): # base class tests uses no-snat router with floating ips self.skipTest('No SNAT with floating ips not supported') def test_add_bgp_peer_with_bad_id(self): with self.subnetpool_with_address_scope( 4, prefixes=['8.0.0.0/8']) as sp: with self.bgp_speaker(sp['ip_version'], 1234) as speaker: self.assertRaises(ext_bgp.BgpPeerNotFound, self.bgp_plugin.add_bgp_peer, self.context, speaker['id'], {'bgp_peer_id': 'aaa'}) vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/dynamic_routing/__init__.py0000666000175100017510000000000013244523345027364 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/qos/0000775000175100017510000000000013244524600022665 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/qos/__init__.py0000666000175100017510000000000013244523345024773 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/qos/test_nsxv3_notification.py0000666000175100017510000004133013244523345030135 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import context from oslo_config import cfg from oslo_utils import uuidutils from neutron.common import exceptions from neutron.objects import base as base_object from neutron.objects.qos import policy as policy_object from neutron.objects.qos import rule as rule_object from neutron.services.qos import qos_plugin from neutron.tests.unit.services.qos import base from vmware_nsx.db import db as nsx_db from vmware_nsx.plugins.nsx_v3 import utils as v3_utils from vmware_nsx.services.qos.nsx_v3 import driver as qos_driver from vmware_nsx.services.qos.nsx_v3 import utils as qos_utils from vmware_nsx.tests.unit.nsx_v3 import test_plugin PLUGIN_NAME = 'vmware_nsx.plugins.nsx_v3.plugin.NsxV3Plugin' class TestQosNsxV3Notification(base.BaseQosTestCase, test_plugin.NsxV3PluginTestCaseMixin): def setUp(self): # Reset the drive to re-create it qos_driver.DRIVER = None super(TestQosNsxV3Notification, self).setUp() self.setup_coreplugin(PLUGIN_NAME) self.qos_plugin = qos_plugin.QoSPlugin() self.ctxt = context.Context('fake_user', 'fake_tenant') mock.patch.object(self.ctxt.session, 'refresh').start() mock.patch.object(self.ctxt.session, 'expunge').start() self.policy_data = { 'policy': {'id': uuidutils.generate_uuid(), 'project_id': uuidutils.generate_uuid(), 'name': 'test-policy', 'description': 'Test policy description', 'shared': True}} self.rule_data = { 'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(), 'max_kbps': 2000, 'max_burst_kbps': 150}} self.ingress_rule_data = { 'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(), 'max_kbps': 3000, 'max_burst_kbps': 350, 'direction': 'ingress'}} self.dscp_rule_data = { 'dscp_marking_rule': {'id': uuidutils.generate_uuid(), 'dscp_mark': 22}} self.policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) # egress BW limit rule self.rule = rule_object.QosBandwidthLimitRule( self.ctxt, **self.rule_data['bandwidth_limit_rule']) # ingress bw limit rule self.ingress_rule = rule_object.QosBandwidthLimitRule( self.ctxt, **self.ingress_rule_data['bandwidth_limit_rule']) self.dscp_rule = rule_object.QosDscpMarkingRule( self.ctxt, **self.dscp_rule_data['dscp_marking_rule']) self.fake_profile_id = 'fake_profile' self.fake_profile = {'id': self.fake_profile_id} mock.patch('neutron.objects.db.api.create_object').start() mock.patch('neutron.objects.db.api.update_object').start() mock.patch('neutron.objects.db.api.delete_object').start() mock.patch.object(nsx_db, 'get_switch_profile_by_qos_policy', return_value=self.fake_profile_id).start() self.peak_bw_multiplier = cfg.CONF.NSX.qos_peak_bw_multiplier self.nsxlib = v3_utils.get_nsxlib_wrapper() @mock.patch( 'neutron.objects.rbac_db.RbacNeutronDbObjectMixin' '.create_rbac_policy') @mock.patch.object(nsx_db, 'add_qos_policy_profile_mapping') def test_policy_create_profile(self, fake_db_add, fake_rbac_create): # test the switch profile creation when a QoS policy is created with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibQosSwitchingProfile.create', return_value=self.fake_profile ) as create_profile: with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=self.policy): with mock.patch('neutron.objects.qos.policy.QosPolicy.create'): policy = self.qos_plugin.create_policy(self.ctxt, self.policy_data) expected_tags = self.nsxlib.build_v3_tags_payload( policy, resource_type='os-neutron-qos-id', project_name=self.ctxt.tenant_name) create_profile.assert_called_once_with( description=self.policy_data["policy"]["description"], name=self.policy_data["policy"]["name"], tags=expected_tags) # verify that the policy->profile mapping entry was added self.assertTrue(fake_db_add.called) @mock.patch( 'neutron.objects.rbac_db.RbacNeutronDbObjectMixin' '.create_rbac_policy') def __test_policy_update_profile(self, *mocks): # test the switch profile update when a QoS policy is updated fields = base_object.get_updatable_fields( policy_object.QosPolicy, self.policy_data['policy']) with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibQosSwitchingProfile.update' ) as update_profile: with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=self.policy): with mock.patch('neutron.objects.qos.policy.QosPolicy.update'): self.qos_plugin.update_policy( self.ctxt, self.policy.id, {'policy': fields}) # verify that the profile was updated with the correct data self.policy_data["policy"]["id"] = self.policy.id expected_tags = self.nsxlib.build_v3_tags_payload( self.policy_data["policy"], resource_type='os-neutron-qos-id', project_name=self.ctxt.tenant_name) update_profile.assert_called_once_with( self.fake_profile_id, description=self.policy_data["policy"]["description"], name=self.policy_data["policy"]["name"], tags=expected_tags ) @mock.patch.object(policy_object.QosPolicy, '_reload_rules') def test_bw_rule_create_profile(self, *mocks): # test the switch profile update when a egress QoS BW rule is created _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) # add a rule to the policy setattr(_policy, "rules", [self.rule]) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibQosSwitchingProfile.' 'set_profile_shaping' ) as update_profile: with mock.patch('neutron.objects.db.api.update_object', return_value=self.rule_data): self.qos_plugin.update_policy_bandwidth_limit_rule( self.ctxt, self.rule.id, _policy.id, self.rule_data) # validate the data on the profile rule_dict = self.rule_data['bandwidth_limit_rule'] expected_bw = int(round(float( rule_dict['max_kbps']) / 1024)) expected_burst = rule_dict['max_burst_kbps'] * 128 expected_peak = int(expected_bw * self.peak_bw_multiplier) # egress neutron rule -> ingress nsx args update_profile.assert_called_once_with( self.fake_profile_id, ingress_bw_enabled=True, ingress_burst_size=expected_burst, ingress_peak_bandwidth=expected_peak, ingress_average_bandwidth=expected_bw, egress_bw_enabled=False, egress_burst_size=None, egress_peak_bandwidth=None, egress_average_bandwidth=None, dscp=0, qos_marking='trusted' ) @mock.patch.object(policy_object.QosPolicy, '_reload_rules') def test_ingress_bw_rule_create_profile(self, *mocks): # test the switch profile update when a ingress QoS BW rule is created _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) # add a rule to the policy setattr(_policy, "rules", [self.ingress_rule]) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibQosSwitchingProfile.' 'set_profile_shaping' ) as update_profile: with mock.patch('neutron.objects.db.api.update_object', return_value=self.ingress_rule_data): self.qos_plugin.update_policy_bandwidth_limit_rule( self.ctxt, self.ingress_rule.id, _policy.id, self.ingress_rule_data) # validate the data on the profile rule_dict = self.ingress_rule_data['bandwidth_limit_rule'] expected_bw = int(round(float( rule_dict['max_kbps']) / 1024)) expected_burst = rule_dict['max_burst_kbps'] * 128 expected_peak = int(expected_bw * self.peak_bw_multiplier) # ingress neutron rule -> egress nsx args update_profile.assert_called_once_with( self.fake_profile_id, egress_bw_enabled=True, egress_burst_size=expected_burst, egress_peak_bandwidth=expected_peak, egress_average_bandwidth=expected_bw, ingress_bw_enabled=False, ingress_burst_size=None, ingress_peak_bandwidth=None, ingress_average_bandwidth=None, dscp=0, qos_marking='trusted' ) @mock.patch.object(policy_object.QosPolicy, '_reload_rules') def test_bw_rule_create_profile_minimal_val(self, *mocks): # test driver precommit with an invalid limit value bad_limit = qos_utils.MAX_KBPS_MIN_VALUE - 1 rule_data = { 'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(), 'max_kbps': bad_limit, 'max_burst_kbps': 150}} rule = rule_object.QosBandwidthLimitRule( self.ctxt, **rule_data['bandwidth_limit_rule']) _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) # add a rule to the policy setattr(_policy, "rules", [rule]) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy),\ mock.patch('neutron.objects.db.api.update_object', return_value=rule_data): self.assertRaises( exceptions.DriverCallError, self.qos_plugin.update_policy_bandwidth_limit_rule, self.ctxt, rule.id, _policy.id, rule_data) @mock.patch.object(policy_object.QosPolicy, '_reload_rules') def test_bw_rule_create_profile_maximal_val(self, *mocks): # test driver precommit with an invalid burst value bad_burst = qos_utils.MAX_BURST_MAX_VALUE + 1 rule_data = { 'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(), 'max_kbps': 1025, 'max_burst_kbps': bad_burst}} rule = rule_object.QosBandwidthLimitRule( self.ctxt, **rule_data['bandwidth_limit_rule']) _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) # add a rule to the policy setattr(_policy, "rules", [rule]) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy),\ mock.patch('neutron.objects.db.api.update_object', return_value=rule_data): self.assertRaises( exceptions.DriverCallError, self.qos_plugin.update_policy_bandwidth_limit_rule, self.ctxt, rule.id, _policy.id, rule_data) @mock.patch.object(policy_object.QosPolicy, '_reload_rules') def test_dscp_rule_create_profile(self, *mocks): # test the switch profile update when a QoS DSCP rule is created _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) # add a rule to the policy setattr(_policy, "rules", [self.dscp_rule]) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibQosSwitchingProfile.' 'set_profile_shaping' ) as update_profile: with mock.patch('neutron.objects.db.api.' 'update_object', return_value=self.dscp_rule_data): self.qos_plugin.update_policy_dscp_marking_rule( self.ctxt, self.dscp_rule.id, _policy.id, self.dscp_rule_data) # validate the data on the profile rule_dict = self.dscp_rule_data['dscp_marking_rule'] dscp_mark = rule_dict['dscp_mark'] update_profile.assert_called_once_with( self.fake_profile_id, ingress_bw_enabled=False, ingress_burst_size=None, ingress_peak_bandwidth=None, ingress_average_bandwidth=None, egress_bw_enabled=False, egress_burst_size=None, egress_peak_bandwidth=None, egress_average_bandwidth=None, dscp=dscp_mark, qos_marking='untrusted' ) def test_rule_delete_profile(self): # test the switch profile update when a QoS rule is deleted _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) # The mock will return the policy without the rule, # as if it was deleted with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibQosSwitchingProfile.' 'set_profile_shaping' ) as update_profile: setattr(_policy, "rules", [self.rule]) self.qos_plugin.delete_policy_bandwidth_limit_rule( self.ctxt, self.rule.id, self.policy.id) # validate the data on the profile update_profile.assert_called_once_with( self.fake_profile_id, ingress_bw_enabled=False, ingress_burst_size=None, ingress_peak_bandwidth=None, ingress_average_bandwidth=None, egress_bw_enabled=False, egress_burst_size=None, egress_peak_bandwidth=None, egress_average_bandwidth=None, dscp=0, qos_marking='trusted' ) @mock.patch('neutron.objects.db.api.get_object', return_value=None) def test_policy_delete_profile(self, *mocks): # test the switch profile deletion when a QoS policy is deleted with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibQosSwitchingProfile.' 'delete', return_value=self.fake_profile ) as delete_profile: self.qos_plugin.delete_policy(self.ctxt, self.policy.id) delete_profile.assert_called_once_with(self.fake_profile_id) vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/qos/test_nsxv_notification.py0000666000175100017510000003426313244523345030061 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron.objects.qos import policy as policy_object from neutron.objects.qos import rule as rule_object from neutron.services.qos import qos_plugin from neutron.tests.unit.services.qos import base from neutron_lib import context from neutron_lib.plugins import directory from neutron_lib.services.qos import constants as qos_consts from oslo_config import cfg from oslo_utils import uuidutils from vmware_nsx.dvs import dvs from vmware_nsx.dvs import dvs_utils from vmware_nsx.services.qos.common import utils as qos_com_utils from vmware_nsx.services.qos.nsx_v import driver as qos_driver from vmware_nsx.services.qos.nsx_v import utils as qos_utils from vmware_nsx.tests.unit.nsx_v import test_plugin CORE_PLUGIN = "vmware_nsx.plugins.nsx_v.plugin.NsxVPluginV2" class TestQosNsxVNotification(test_plugin.NsxVPluginV2TestCase, base.BaseQosTestCase): @mock.patch.object(dvs_utils, 'dvs_create_session') def setUp(self, *mocks): # init the nsx-v plugin for testing with DVS self._init_dvs_config() # Reset the drive to re-create it qos_driver.DRIVER = None super(TestQosNsxVNotification, self).setUp(plugin=CORE_PLUGIN, ext_mgr=None) self.setup_coreplugin(CORE_PLUGIN) plugin_instance = directory.get_plugin() self._core_plugin = plugin_instance self._core_plugin.init_is_complete = True self.qos_plugin = qos_plugin.QoSPlugin() mock.patch.object(qos_utils.NsxVQosRule, '_get_qos_plugin', return_value=self.qos_plugin).start() # Pre defined QoS data for the tests self.ctxt = context.Context('fake_user', 'fake_tenant') self.policy_data = { 'policy': {'id': uuidutils.generate_uuid(), 'project_id': uuidutils.generate_uuid(), 'name': 'test-policy', 'description': 'Test policy description', 'shared': True}} self.rule_data = { 'bandwidth_limit_rule': { 'id': uuidutils.generate_uuid(), 'max_kbps': 100, 'max_burst_kbps': 150, 'type': qos_consts.RULE_TYPE_BANDWIDTH_LIMIT}} self.ingress_rule_data = { 'bandwidth_limit_rule': { 'id': uuidutils.generate_uuid(), 'max_kbps': 200, 'max_burst_kbps': 250, 'direction': 'ingress', 'type': qos_consts.RULE_TYPE_BANDWIDTH_LIMIT}} self.dscp_rule_data = { 'dscp_marking_rule': { 'id': uuidutils.generate_uuid(), 'dscp_mark': 22, 'type': qos_consts.RULE_TYPE_DSCP_MARKING}} self.policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) # egress bw rule self.rule = rule_object.QosBandwidthLimitRule( self.ctxt, **self.rule_data['bandwidth_limit_rule']) # ingress bw rule self.ingress_rule = rule_object.QosBandwidthLimitRule( self.ctxt, **self.ingress_rule_data['bandwidth_limit_rule']) # dscp marking rule self.dscp_rule = rule_object.QosDscpMarkingRule( self.ctxt, **self.dscp_rule_data['dscp_marking_rule']) self._net_data = {'network': { 'name': 'test-qos', 'tenant_id': 'fake_tenant', 'qos_policy_id': self.policy.id, 'port_security_enabled': False, 'admin_state_up': False, 'shared': False }} self._rules = [self.rule_data['bandwidth_limit_rule']] self._dscp_rules = [self.dscp_rule_data['dscp_marking_rule']] mock.patch( 'neutron.objects.qos.policy.QosPolicy.obj_load_attr').start() def _init_dvs_config(self): # Ensure that DVS is enabled # and enable the DVS features for nsxv qos support cfg.CONF.set_override('host_ip', 'fake_ip', group='dvs') cfg.CONF.set_override('host_username', 'fake_user', group='dvs') cfg.CONF.set_override('host_password', 'fake_password', group='dvs') cfg.CONF.set_override('dvs_name', 'fake_dvs', group='dvs') cfg.CONF.set_default('use_dvs_features', True, 'nsxv') def _create_net(self, net_data=None): if net_data is None: net_data = self._net_data with mock.patch('vmware_nsx.services.qos.common.utils.' 'get_network_policy_id', return_value=self.policy.id): return self._core_plugin.create_network(self.ctxt, net_data) @mock.patch.object(qos_com_utils, 'update_network_policy_binding') @mock.patch.object(dvs.DvsManager, 'update_port_groups_config') def test_create_network_with_policy_rule(self, dvs_update_mock, update_bindings_mock): """Test the DVS update when a QoS rule is attached to a network""" # Create a policy with a rule _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) setattr(_policy, "rules", [self.rule, self.ingress_rule, self.dscp_rule]) with mock.patch('neutron.services.qos.qos_plugin.QoSPlugin.' 'get_policy', return_value=_policy) as get_rules_mock: # create the network to use this policy net = self._create_net() # make sure the network-policy binding was updated update_bindings_mock.assert_called_once_with( self.ctxt, net['id'], self.policy.id) # make sure the qos rule was found get_rules_mock.assert_called_with(self.ctxt, self.policy.id) # make sure the dvs was updated self.assertTrue(dvs_update_mock.called) @mock.patch.object(qos_com_utils, 'update_network_policy_binding') @mock.patch.object(dvs.DvsManager, 'update_port_groups_config') def test_create_network_with_default_policy(self, dvs_update_mock, update_bindings_mock): """Test the DVS update when default policy attached to a network""" # Create a default policy with a rule policy_data = copy.deepcopy(self.policy_data['policy']) policy_data['is_default'] = True _policy = policy_object.QosPolicy(self.ctxt, **policy_data) setattr(_policy, "rules", [self.rule, self.dscp_rule]) default_policy = policy_object.QosPolicyDefault( qos_policy_id=policy_data['id']) with mock.patch('neutron.services.qos.qos_plugin.QoSPlugin.' 'get_policy', return_value=_policy) as get_rules_mock,\ mock.patch('neutron.objects.qos.policy.QosPolicyDefault.' 'get_object', return_value=default_policy): # create the network (with no specific qos policy) net_data = copy.deepcopy(self._net_data) del net_data['network']['qos_policy_id'] net = self._create_net(net_data=net_data) # make sure the network-policy binding was updated update_bindings_mock.assert_called_once_with( self.ctxt, net['id'], self.policy.id) # make sure the qos rule was found get_rules_mock.assert_called_with(self.ctxt, self.policy.id) # make sure the dvs was updated self.assertTrue(dvs_update_mock.called) @mock.patch.object(qos_com_utils, 'update_network_policy_binding') @mock.patch.object(dvs.DvsManager, 'update_port_groups_config') def _test_rule_action_notification(self, action, dvs_update_mock, update_bindings_mock): # Create a policy with a rule _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) # set the rule in the policy data setattr(_policy, "rules", [self.rule]) with mock.patch('neutron.services.qos.qos_plugin.QoSPlugin.' 'get_policy', return_value=_policy) as get_rules_mock,\ mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): # create the network to use this policy net = self._create_net() dvs_update_mock.called = False get_rules_mock.called = False with mock.patch('neutron.objects.db.api.create_object', return_value=self.rule_data),\ mock.patch('neutron.objects.db.api.update_object', return_value=self.rule_data),\ mock.patch('neutron.objects.db.api.delete_object'),\ mock.patch.object(_policy, 'get_bound_networks', return_value=[net['id']]),\ mock.patch.object(self.ctxt.session, 'expunge'): # create/update/delete the rule if action == 'create': self.qos_plugin.create_policy_bandwidth_limit_rule( self.ctxt, self.policy.id, self.rule_data) elif action == 'update': self.qos_plugin.update_policy_bandwidth_limit_rule( self.ctxt, self.rule.id, self.policy.id, self.rule_data) else: self.qos_plugin.delete_policy_bandwidth_limit_rule( self.ctxt, self.rule.id, self.policy.id) # make sure the qos rule was found self.assertTrue(get_rules_mock.called) # make sure the dvs was updated self.assertTrue(dvs_update_mock.called) def test_create_rule_notification(self): """Test the DVS update when a QoS rule, attached to a network, is created """ self._test_rule_action_notification('create') def test_update_rule_notification(self): """Test the DVS update when a QoS rule, attached to a network, is modified """ self._test_rule_action_notification('update') def test_delete_rule_notification(self): """Test the DVS update when a QoS rule, attached to a network, is deleted """ self._test_rule_action_notification('delete') @mock.patch.object(qos_com_utils, 'update_network_policy_binding') @mock.patch.object(dvs.DvsManager, 'update_port_groups_config') def _test_dscp_rule_action_notification(self, action, dvs_update_mock, update_bindings_mock): # Create a policy with a rule _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) # set the rule in the policy data setattr(_policy, "rules", [self.dscp_rule]) plugin = self.qos_plugin with mock.patch('neutron.services.qos.qos_plugin.QoSPlugin.' 'get_policy', return_value=_policy) as rules_mock,\ mock.patch('neutron.objects.qos.policy.' 'QosPolicy.get_object', return_value=_policy),\ mock.patch.object(self.ctxt.session, 'expunge'): # create the network to use this policy net = self._create_net() dvs_update_mock.called = False rules_mock.called = False with mock.patch('neutron.objects.db.api.create_object', return_value=self.dscp_rule_data),\ mock.patch('neutron.objects.db.api.update_object', return_value=self.dscp_rule_data),\ mock.patch('neutron.objects.db.api.delete_object'),\ mock.patch.object(_policy, 'get_bound_networks', return_value=[net['id']]),\ mock.patch.object(self.ctxt.session, 'expunge'): # create/update/delete the rule if action == 'create': plugin.create_policy_dscp_marking_rule( self.ctxt, self.policy.id, self.dscp_rule_data) elif action == 'update': plugin.update_policy_dscp_marking_rule( self.ctxt, self.dscp_rule.id, self.policy.id, self.dscp_rule_data) else: plugin.delete_policy_dscp_marking_rule( self.ctxt, self.dscp_rule.id, self.policy.id) # make sure the qos rule was found self.assertTrue(rules_mock.called) # make sure the dvs was updated self.assertTrue(dvs_update_mock.called) def test_create_dscp_rule_notification(self): """Test the DVS update when a QoS DSCP rule, attached to a network, is created """ self._test_dscp_rule_action_notification('create') def test_update_dscp_rule_notification(self): """Test the DVS update when a QoS DSCP rule, attached to a network, is modified """ self._test_dscp_rule_action_notification('update') def test_delete_dscp_rule_notification(self): """Test the DVS update when a QoS DSCP rule, attached to a network, is deleted """ self._test_dscp_rule_action_notification('delete') vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/flowclassifier/0000775000175100017510000000000013244524600025077 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/flowclassifier/test_nsxv_driver.py0000666000175100017510000002624313244523345031077 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_utils import importutils from vmware_nsx.services.flowclassifier.nsx_v import driver as nsx_v_driver from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsx_v.vshield import fake_vcns from neutron.api import extensions as api_ext from neutron.common import config from neutron_lib.api.definitions import portbindings from neutron_lib import context from neutron_lib.plugins import directory from networking_sfc.db import flowclassifier_db as fdb from networking_sfc.extensions import flowclassifier from networking_sfc.services.flowclassifier.common import context as fc_ctx from networking_sfc.services.flowclassifier.common import exceptions as fc_exc from networking_sfc.tests import base from networking_sfc.tests.unit.db import test_flowclassifier_db class TestNsxvFlowClassifierDriver( test_flowclassifier_db.FlowClassifierDbPluginTestCaseBase, base.NeutronDbPluginV2TestCase): resource_prefix_map = dict([ (k, flowclassifier.FLOW_CLASSIFIER_PREFIX) for k in flowclassifier.RESOURCE_ATTRIBUTE_MAP.keys() ]) def setUp(self): # init the flow classifier plugin flowclassifier_plugin = ( test_flowclassifier_db.DB_FLOWCLASSIFIER_PLUGIN_CLASS) service_plugins = { flowclassifier.FLOW_CLASSIFIER_EXT: flowclassifier_plugin } fdb.FlowClassifierDbPlugin.supported_extension_aliases = [ flowclassifier.FLOW_CLASSIFIER_EXT] fdb.FlowClassifierDbPlugin.path_prefix = ( flowclassifier.FLOW_CLASSIFIER_PREFIX ) super(TestNsxvFlowClassifierDriver, self).setUp( ext_mgr=None, plugin=None, service_plugins=service_plugins ) self.flowclassifier_plugin = importutils.import_object( flowclassifier_plugin) ext_mgr = api_ext.PluginAwareExtensionManager( test_flowclassifier_db.extensions_path, { flowclassifier.FLOW_CLASSIFIER_EXT: self.flowclassifier_plugin } ) app = config.load_paste_app('extensions_test_app') self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr) self.ctx = context.get_admin_context() # use the fake vcns mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) mock_vcns_instance = mock_vcns.start() self.fc2 = fake_vcns.FakeVcns() mock_vcns_instance.return_value = self.fc2 # use the nsxv flow classifier driver self._profile_id = 'serviceprofile-1' cfg.CONF.set_override('service_insertion_profile_id', self._profile_id, 'nsxv') cfg.CONF.set_override('service_insertion_redirect_all', True, 'nsxv') self.driver = nsx_v_driver.NsxvFlowClassifierDriver() self.driver.initialize() self._fc_name = 'test1' self._fc_description = 'test 1' self._fc_source = '10.10.0.0/24' self._fc_dest = '20.10.0.0/24' self._fc_prot = 'TCP' self._fc_source_ports = range(100, 115) self._fc_dest_ports = range(80, 81) self._fc = {'name': self._fc_name, 'description': self._fc_description, 'logical_source_port': None, 'logical_destination_port': None, 'source_ip_prefix': self._fc_source, 'destination_ip_prefix': self._fc_dest, 'protocol': self._fc_prot, 'source_port_range_min': self._fc_source_ports[0], 'source_port_range_max': self._fc_source_ports[-1], 'destination_port_range_min': self._fc_dest_ports[0], 'destination_port_range_max': self._fc_dest_ports[-1]} def tearDown(self): super(TestNsxvFlowClassifierDriver, self).tearDown() def test_driver_init(self): self.assertEqual(self._profile_id, self.driver._profile_id) self.assertEqual(self.driver._security_group_id, '0') orig_get_plugin = directory.get_plugin def mocked_get_plugin(plugin=None): # mock only the core plugin if plugin: return orig_get_plugin(plugin) return mock_nsxv_plugin mock_nsxv_plugin = mock.Mock() fc_plugin = directory.get_plugin(flowclassifier.FLOW_CLASSIFIER_EXT) with mock.patch.object(directory, 'get_plugin', new=mocked_get_plugin): with mock.patch.object( mock_nsxv_plugin, 'add_vms_to_service_insertion') as fake_add: with mock.patch.object( fc_plugin, 'create_flow_classifier') as fake_create: self.driver.init_complete(None, None, {}) # check that the plugin was called to add vms to the # security group self.assertTrue(fake_add.called) # check that redirect_all flow classifier entry # was created self.assertTrue(fake_create.called) def test_create_flow_classifier_precommit(self): with self.flow_classifier(flow_classifier=self._fc) as fc: fc_context = fc_ctx.FlowClassifierContext( self.flowclassifier_plugin, self.ctx, fc['flow_classifier'] ) # just make sure it does not raise an exception self.driver.create_flow_classifier_precommit(fc_context) def test_create_flow_classifier_precommit_logical_source_port(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'logical_source_port': src_port['port']['id'] }) as fc: fc_context = fc_ctx.FlowClassifierContext( self.flowclassifier_plugin, self.ctx, fc['flow_classifier'] ) self.assertRaises( fc_exc.FlowClassifierBadRequest, self.driver.create_flow_classifier_precommit, fc_context) def test_create_flow_classifier_precommit_logical_dest_port(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as dst_port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'logical_destination_port': dst_port['port']['id'] }) as fc: fc_context = fc_ctx.FlowClassifierContext( self.flowclassifier_plugin, self.ctx, fc['flow_classifier'] ) self.assertRaises( fc_exc.FlowClassifierBadRequest, self.driver.create_flow_classifier_precommit, fc_context) def _validate_rule_structure(self, rule): self.assertEqual(self._fc_description, rule.find('notes').text) self.assertEqual('ipv4', rule.find('packetType').text) self.assertEqual( self._fc_source, rule.find('sources').find('source').find('value').text) self.assertEqual( self._fc_dest, rule.find('destinations').find('destination').find('value').text) ports = "%s-%s" % (self._fc_source_ports[0], self._fc_source_ports[-1]) if self._fc_source_ports[0] == self._fc_source_ports[-1]: ports = str(self._fc_source_ports[0]) self.assertEqual( ports, rule.find('services').find('service').find('sourcePort').text) ports = "%s-%s" % (self._fc_dest_ports[0], self._fc_dest_ports[-1]) if self._fc_dest_ports[0] == self._fc_dest_ports[-1]: ports = str(self._fc_dest_ports[0]) self.assertEqual( ports, rule.find('services').find('service').find('destinationPort').text) self.assertEqual( self._fc_prot, rule.find('services').find('service').find('protocolName').text) self.assertTrue(rule.find('name').text.startswith(self._fc_name)) def test_create_flow_classifier(self): with self.flow_classifier(flow_classifier=self._fc) as fc: fc_context = fc_ctx.FlowClassifierContext( self.flowclassifier_plugin, self.ctx, fc['flow_classifier'] ) with mock.patch.object( self.driver, 'update_redirect_section_in_backed') as mock_update_section: self.driver.create_flow_classifier(fc_context) self.assertTrue(mock_update_section.called) section = mock_update_section.call_args[0][0] self._validate_rule_structure(section.find('rule')) def test_update_flow_classifier(self): with self.flow_classifier(flow_classifier=self._fc) as fc: fc_context = fc_ctx.FlowClassifierContext( self.flowclassifier_plugin, self.ctx, fc['flow_classifier'] ) self.driver.create_flow_classifier(fc_context) with mock.patch.object( self.driver, 'update_redirect_section_in_backed') as mock_update_section: self.driver.update_flow_classifier(fc_context) self.assertTrue(mock_update_section.called) section = mock_update_section.call_args[0][0] self._validate_rule_structure(section.find('rule')) def test_delete_flow_classifier(self): with self.flow_classifier(flow_classifier=self._fc) as fc: fc_context = fc_ctx.FlowClassifierContext( self.flowclassifier_plugin, self.ctx, fc['flow_classifier'] ) self.driver.create_flow_classifier(fc_context) with mock.patch.object( self.driver, 'update_redirect_section_in_backed') as mock_update_section: self.driver.delete_flow_classifier(fc_context) self.assertTrue(mock_update_section.called) section = mock_update_section.call_args[0][0] # make sure the rule is not there self.assertIsNone(section.find('rule')) vmware-nsx-12.0.1/vmware_nsx/tests/unit/services/flowclassifier/__init__.py0000666000175100017510000000000013244523345027205 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/shell/0000775000175100017510000000000013244524600021347 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/shell/test_admin_utils.py0000666000175100017510000002733013244523345025304 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import mock from neutron.common import config as neutron_config from neutron.db import servicetype_db # noqa from neutron.quota import resource_registry from neutron.tests import base from neutron_lib.callbacks import registry from neutron_lib.plugins import constants from oslo_config import cfg from oslo_log import _options from oslo_log import log as logging from oslo_utils import uuidutils import six from vmware_nsxlib.v3 import resources as nsx_v3_resources from vmware_nsx._i18n import _ from vmware_nsx.common import config # noqa from vmware_nsx.db import nsxv_db from vmware_nsx.dvs import dvs_utils from vmware_nsx.shell.admin.plugins.nsxv.resources import utils as nsxv_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils as nsxv3_utils from vmware_nsx.shell import resources from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsx_v import test_plugin as test_v_plugin from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_v3_plugin LOG = logging.getLogger(__name__) NSX_INI_PATH = vmware.get_fake_conf('nsx.ini.test') BASE_CONF_PATH = vmware.get_fake_conf('neutron.conf.test') @six.add_metaclass(abc.ABCMeta) class AbstractTestAdminUtils(base.BaseTestCase): def setUp(self): cfg.CONF.unregister_opts(_options.common_cli_opts) cfg.CONF.register_cli_opts(resources.cli_opts) super(AbstractTestAdminUtils, self).setUp() # remove resource registration conflicts resource_registry.unregister_all_resources() # Init the neutron config neutron_config.init(args=['--config-file', BASE_CONF_PATH, '--config-file', NSX_INI_PATH]) self._init_mock_plugin() self._init_resource_plugin() self.addCleanup(resource_registry.unregister_all_resources) def _init_mock_plugin(self): mock_query = mock.patch( "vmware_nsx.shell.admin.plugins.common.utils.query_yes_no") mock_query.start() @abc.abstractmethod def _get_plugin_name(self): pass def _init_resource_plugin(self): plugin_name = self._get_plugin_name() resources.init_resource_plugin( plugin_name, resources.get_plugin_dir(plugin_name)) def _test_resource(self, res_name, op, **kwargs): errors = self._test_resource_with_errors(res_name, op, **kwargs) if len(errors) > 0: msg = (_("admin util %(res)s/%(op)s failed with message: " "%(err)s") % {'res': res_name, 'op': op, 'err': errors[0]}) self.fail(msg=msg) def _test_resource_with_errors(self, res_name, op, **kwargs): # Must call the internal notify_loop in order to get the errors return registry._get_callback_manager()._notify_loop( res_name, op, 'nsxadmin', **kwargs) def _test_resources(self, res_dict): for res in res_dict.keys(): res_name = res_dict[res].name for op in res_dict[res].supported_ops: self._test_resource(res_name, op) def _test_resources_with_args(self, res_dict, func_args): for res in res_dict.keys(): res_name = res_dict[res].name for op in res_dict[res].supported_ops: args = {'property': func_args} self._test_resource(res_name, op, **args) class TestNsxvAdminUtils(AbstractTestAdminUtils, test_v_plugin.NsxVPluginV2TestCase): def _get_plugin_name(self): return 'nsxv' def _init_mock_plugin(self, *mocks): super(TestNsxvAdminUtils, self)._init_mock_plugin() # support the dvs manager: mock.patch.object(dvs_utils, 'dvs_create_session').start() # override metadata get-object dummy_lb = { 'enabled': True, 'enableServiceInsertion': True, 'accelerationEnabled': True, 'virtualServer': [], 'applicationProfile': [], 'pool': [], 'applicationRule': [] } mock.patch('vmware_nsx.plugins.nsx_v.vshield.nsxv_edge_cfg_obj.' 'NsxvEdgeCfgObj.get_object', return_value=dummy_lb).start() # Tests shouldn't wait for dummy spawn jobs to finish mock.patch('vmware_nsx.shell.admin.plugins.nsxv.resources.utils.' 'NsxVPluginWrapper.count_spawn_jobs', return_value=0).start() self._plugin = nsxv_utils.NsxVPluginWrapper() def get_plugin_mock(alias=constants.CORE): if alias in (constants.CORE, constants.L3): return self._plugin mock.patch("neutron_lib.plugins.directory.get_plugin", side_effect=get_plugin_mock).start() # Create a router to make sure we have deployed an edge self.router = self.create_router() def tearDown(self): if self.router and self.router.get('id'): edgeapi = nsxv_utils.NeutronDbClient() self._plugin.delete_router(edgeapi.context, self.router['id']) super(TestNsxvAdminUtils, self).tearDown() def test_nsxv_resources(self): self._test_resources(resources.nsxv_resources) def _test_edge_nsx_update(self, edge_id, params): args = {'property': ["edge-id=%s" % edge_id]} args['property'].extend(params) self._test_resource('edges', 'nsx-update', **args) def create_router(self): # Create an exclusive router (with an edge) tenant_id = uuidutils.generate_uuid() data = {'router': {'tenant_id': tenant_id}} data['router']['name'] = 'dummy' data['router']['admin_state_up'] = True data['router']['router_type'] = 'exclusive' edgeapi = nsxv_utils.NeutronDbClient() return self._plugin.create_router(edgeapi.context, data) def get_edge_id(self): edgeapi = nsxv_utils.NeutronDbClient() bindings = nsxv_db.get_nsxv_router_bindings(edgeapi.context.session) for binding in bindings: if binding.edge_id: return binding.edge_id # use a dummy edge return "edge-1" def test_edge_nsx_updates(self): """Test eges/nsx-update utility with different inputs.""" edge_id = self.get_edge_id() self._test_edge_nsx_update(edge_id, ["appliances=true"]) self._test_edge_nsx_update(edge_id, ["size=compact"]) self._test_edge_nsx_update(edge_id, ["hostgroup=update"]) self._test_edge_nsx_update(edge_id, ["hostgroup=all"]) self._test_edge_nsx_update(edge_id, ["hostgroup=clean"]) self._test_edge_nsx_update(edge_id, ["highavailability=True"]) self._test_edge_nsx_update(edge_id, ["resource=cpu", "limit=100"]) self._test_edge_nsx_update(edge_id, ["syslog-server=1.1.1.1", "syslog-proto=tcp", "log-level=debug"]) def test_bad_args(self): args = {'property': ["xxx"]} errors = self._test_resource_with_errors( 'networks', 'nsx-update', **args) self.assertEqual(1, len(errors)) def test_resources_with_common_args(self): """Run all nsxv admin utilities with some common arguments Using arguments like edge-id which many apis need This improves the test coverage """ edge_id = self.get_edge_id() args = ["edge-id=%s" % edge_id, "router-id=e5b9b249-0034-4729-8ab6-fe4dacaa3a12", "policy-id=1", "network_id=net-1", "net-id=net-1", "security-group-id=sg-1", "dvs-id=dvs-1", "moref=virtualwire-1", "teamingpolicy=LACP_ACTIVE" ] self._test_resources_with_args( resources.nsxv_resources, args) def test_router_recreate(self): # Testing router-recreate separately because it may change the edge-id edge_id = self.get_edge_id() args = {'property': ["edge-id=%s" % edge_id]} self._test_resource('routers', 'nsx-recreate', **args) class TestNsxv3AdminUtils(AbstractTestAdminUtils, test_v3_plugin.NsxV3PluginTestCaseMixin): def _patch_object(self, *args, **kwargs): patcher = mock.patch.object(*args, **kwargs) patcher.start() self._patchers.append(patcher) def _init_mock_plugin(self): test_v3_plugin._mock_nsx_backend_calls() # mock resources for cls in (nsx_v3_resources.LogicalPort, nsx_v3_resources.LogicalDhcpServer, nsx_v3_resources.LogicalRouter, nsx_v3_resources.SwitchingProfile): self._patch_object(cls, '__init__', return_value=None) self._patch_object(cls, 'list', return_value={'results': []}) self._patch_object(cls, 'get', return_value={'id': uuidutils.generate_uuid()}) self._patch_object(cls, 'update') self._patch_object(nsx_v3_resources.SwitchingProfile, 'find_by_display_name', return_value=[{'id': uuidutils.generate_uuid()}]) super(TestNsxv3AdminUtils, self)._init_mock_plugin() self._plugin = nsxv3_utils.NsxV3PluginWrapper() mock_nm_get_plugin = mock.patch( "neutron_lib.plugins.directory.get_plugin") self.mock_nm_get_plugin = mock_nm_get_plugin.start() self.mock_nm_get_plugin.return_value = self._plugin def _get_plugin_name(self): return 'nsxv3' def test_nsxv3_resources(self): self._test_resources(resources.nsxv3_resources) def test_resources_with_common_args(self): """Run all nsxv3 admin utilities with some common arguments Using arguments like dhcp_profile_uuid which many apis need This improves the test coverage """ args = ["dhcp_profile_uuid=e5b9b249-0034-4729-8ab6-fe4dacaa3a12", "metadata_proxy_uuid=e5b9b249-0034-4729-8ab6-fe4dacaa3a12", "nsx-id=e5b9b249-0034-4729-8ab6-fe4dacaa3a12", "availability-zone=default", "server-ip=1.1.1.1" ] # Create some neutron objects for the utilities to run on self._create_router() with self._create_l3_ext_network() as network: with self.subnet(network=network) as subnet: with self.port(subnet=subnet): # Run all utilities with backend objects self._test_resources_with_args( resources.nsxv3_resources, args) def _create_router(self): tenant_id = uuidutils.generate_uuid() data = {'router': {'tenant_id': tenant_id}} data['router']['name'] = 'dummy' data['router']['admin_state_up'] = True edgeapi = nsxv_utils.NeutronDbClient() return self._plugin.create_router(edgeapi.context, data) class TestNsxtvdAdminUtils(AbstractTestAdminUtils): def _get_plugin_name(self): return 'nsxtvd' def test_nsxtv_resources(self): self._test_resources(resources.nsxtvd_resources) vmware-nsx-12.0.1/vmware_nsx/tests/unit/shell/__init__.py0000666000175100017510000000000013244523345023455 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/0000775000175100017510000000000013244524600021013 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_post_security_profile.json0000666000175100017510000000054513244523345027343 0ustar zuulzuul00000000000000{ "display_name": "%(display_name)s", "_href": "/ws.v1/security-profile/%(uuid)s", "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}, {"scope": "nova_spid", "tag": "%(nova_spid)s"}], "logical_port_egress_rules": [], "_schema": "/ws.v1/schema/SecurityProfileConfig", "logical_port_ingress_rules": [], "uuid": "%(uuid)s" } vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_put_lswitch_lport_att.json0000666000175100017510000000041613244523345027341 0ustar zuulzuul00000000000000{ "LogicalPortAttachment": { %(peer_port_href_field)s %(peer_port_uuid_field)s %(vif_uuid_field)s "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(lp_uuid)s/attachment", "type": "%(type)s", "schema": "/ws.v1/schema/%(type)s" } }vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_post_lrouter.json0000666000175100017510000000114313244523345025443 0ustar zuulzuul00000000000000{ "display_name": "%(display_name)s", %(distributed_json)s "uuid": "%(uuid)s", "tags": [ { "scope": "os_tid", "tag": "%(tenant_id)s" } ], "routing_config": { "type": "SingleDefaultRouteImplicitRoutingConfig", "_schema": "/ws.v1/schema/SingleDefaultRouteImplicitRoutingConfig", "default_route_next_hop": { "type": "RouterNextHop", "_schema": "/ws.v1/schema/RouterNextHop", "gateway_ip_address": "%(default_next_hop)s" } }, "_schema": "/ws.v1/schema/LogicalRouterConfig", "type": "LogicalRouterConfig", "_href": "/ws.v1/lrouter/%(uuid)s" }vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_post_lswitch.json0000666000175100017510000000057113244523345025430 0ustar zuulzuul00000000000000{ "display_name": "%(display_name)s", "uuid": "%(uuid)s", "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}], "type": "LogicalSwitchConfig", "_schema": "/ws.v1/schema/LogicalSwitchConfig", "port_isolation_enabled": false, "transport_zones": [ {"zone_uuid": "%(zone_uuid)s", "transport_type": "stt"}], "_href": "/ws.v1/lswitch/%(uuid)s" } vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_post_lrouter_lport.json0000666000175100017510000000050013244523345026657 0ustar zuulzuul00000000000000{ "display_name": "%(display_name)s", "_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(uuid)s", "_schema": "/ws.v1/schema/LogicalRouterPortConfig", "mac_address": "00:00:00:00:00:00", "admin_status_enabled": true, "ip_addresses": %(ip_addresses_json)s, "type": "LogicalRouterPortConfig", "uuid": "%(uuid)s" }vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/nsx.ini.agentless.test0000666000175100017510000000055213244523345025277 0ustar zuulzuul00000000000000[DEFAULT] default_tz_uuid = fake_tz_uuid nova_zone_id = whatever nsx_controllers = fake_1, fake_2 nsx_user = foo nsx_password = bar default_l3_gw_service_uuid = whatever default_l2_gw_service_uuid = whatever default_service_cluster_uuid = whatever nsx_default_interface_name = whatever http_timeout = 13 redirects = 12 retries = 11 [NSX] agent_mode = agentless vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_get_security_profile.json0000666000175100017510000000064613244523345027137 0ustar zuulzuul00000000000000{ "display_name": "%(display_name)s", "_href": "/ws.v1/security-profile/%(uuid)s", "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}, {"scope": "nova_spid", "tag": "%(nova_spid)s"}], "logical_port_egress_rules": %(logical_port_egress_rules_json)s, "_schema": "/ws.v1/schema/SecurityProfileConfig", "logical_port_ingress_rules": %(logical_port_ingress_rules_json)s, "uuid": "%(uuid)s" } vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_get_lswitch_lport_status.json0000666000175100017510000000127313244523345030045 0ustar zuulzuul00000000000000{"_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s", "lswitch": {"display_name": "%(ls_name)s", "uuid": "%(ls_uuid)s", "tags": [ {"scope": "os_tid", "tag": "%(ls_tenant_id)s"} ], "type": "LogicalSwitchConfig", "_schema": "/ws.v1/schema/LogicalSwitchConfig", "port_isolation_enabled": false, "transport_zones": [ {"zone_uuid": "%(ls_zone_uuid)s", "transport_type": "stt"} ], "_href": "/ws.v1/lswitch/%(ls_uuid)s"}, "link_status_up": false, "_schema": "/ws.v1/schema/LogicalSwitchPortStatus", "admin_status_enabled": true, "fabric_status_up": true, "link_status_up": true, "type": "LogicalSwitchPortStatus" } vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/nsx.ini.combined.test0000666000175100017510000000055113244523345025071 0ustar zuulzuul00000000000000[DEFAULT] default_tz_uuid = fake_tz_uuid nova_zone_id = whatever nsx_controllers = fake_1, fake_2 nsx_user = foo nsx_password = bar default_l3_gw_service_uuid = whatever default_l2_gw_service_uuid = whatever default_service_cluster_uuid = whatever nsx_default_interface_name = whatever http_timeout = 13 redirects = 12 retries = 11 [NSX] agent_mode = combined vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/nsx.ini.basic.test0000666000175100017510000000014513244523345024371 0ustar zuulzuul00000000000000[DEFAULT] default_tz_uuid = fake_tz_uuid nsx_controllers=fake_1,fake_2 nsx_user=foo nsx_password=bar vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_put_lrouter_lport_att.json0000666000175100017510000000046513244523345027364 0ustar zuulzuul00000000000000{ "LogicalPortAttachment": { %(peer_port_href_field)s %(peer_port_uuid_field)s %(l3_gateway_service_uuid_field)s %(vlan_id_field)s "_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(lp_uuid)s/attachment", "type": "%(type)s", "schema": "/ws.v1/schema/%(type)s" } }vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_get_lrouter_nat.json0000666000175100017510000000017713244523345026105 0ustar zuulzuul00000000000000{ "_href": "/ws.v1/lrouter/%(lr_uuid)s/nat/%(uuid)s", "type": "%(type)s", "match": %(match_json)s, "uuid": "%(uuid)s" }vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_get_lswitch.json0000666000175100017510000000070613244523345025222 0ustar zuulzuul00000000000000{"display_name": "%(display_name)s", "_href": "/ws.v1/lswitch/%(uuid)s", "_schema": "/ws.v1/schema/LogicalSwitchConfig", "_relations": {"LogicalSwitchStatus": {"fabric_status": %(status)s, "type": "LogicalSwitchStatus", "lport_count": %(lport_count)d, "_href": "/ws.v1/lswitch/%(uuid)s/status", "_schema": "/ws.v1/schema/LogicalSwitchStatus"}}, "type": "LogicalSwitchConfig", "tags": %(tags_json)s, "uuid": "%(uuid)s"} vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/nsx.ini.full.test0000666000175100017510000000044413244523345024254 0ustar zuulzuul00000000000000[DEFAULT] default_tz_uuid = fake_tz_uuid nova_zone_id = whatever nsx_controllers = fake_1, fake_2 nsx_user = foo nsx_password = bar default_l3_gw_service_uuid = whatever default_l2_gw_service_uuid = whatever nsx_default_interface_name = whatever http_timeout = 13 redirects = 12 retries = 11 vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_post_lswitch_lport.json0000666000175100017510000000100613244523345026642 0ustar zuulzuul00000000000000{ "display_name": "%(uuid)s", "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s", "security_profiles": [], "tags": [{"scope": "q_port_id", "tag": "%(neutron_port_id)s"}, {"scope": "vm_id", "tag": "%(neutron_device_id)s"}, {"scope": "os_tid", "tag": "%(tenant_id)s"}], "portno": 1, "queue_uuid": null, "_schema": "/ws.v1/schema/LogicalSwitchPortConfig", "mirror_targets": [], "allowed_address_pairs": [], "admin_status_enabled": true, "type": "LogicalSwitchPortConfig", "uuid": "%(uuid)s" } vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_get_lswitch_lport.json0000666000175100017510000000170613244523345026443 0ustar zuulzuul00000000000000{"display_name": "%(display_name)s", "_relations": {"LogicalPortStatus": {"type": "LogicalSwitchPortStatus", "admin_status_enabled": true, "fabric_status_up": %(status)s, "link_status_up": %(status)s, "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s/status", "_schema": "/ws.v1/schema/LogicalSwitchPortStatus"}, "LogicalSwitchConfig": {"uuid": "%(ls_uuid)s"}, "LogicalPortAttachment": { "type": "%(att_type)s", %(att_info_json)s "schema": "/ws.v1/schema/%(att_type)s" } }, "tags": [{"scope": "q_port_id", "tag": "%(neutron_port_id)s"}, {"scope": "vm_id", "tag": "%(neutron_device_id)s"}, {"scope": "os_tid", "tag": "%(tenant_id)s"}], "uuid": "%(uuid)s", "admin_status_enabled": "%(admin_status_enabled)s", "type": "LogicalSwitchPortConfig", "_schema": "/ws.v1/schema/LogicalSwitchPortConfig", "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s" } vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_get_lswitch_lport_att.json0000666000175100017510000000016513244523345027311 0ustar zuulzuul00000000000000{ "LogicalPortAttachment": { "type": "%(att_type)s", "schema": "/ws.v1/schema/%(att_type)s" } }vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_get_gwservice.json0000666000175100017510000000056613244523345025547 0ustar zuulzuul00000000000000{ "display_name": "%(display_name)s", "_href": "/ws.v1/gateway-service/%(uuid)s", "tags": %(tags_json)s, "_schema": "/ws.v1/schema/L2GatewayServiceConfig", "gateways": [ { "transport_node_uuid": "%(transport_node_uuid)s", "type": "L2Gateway", "device_id": "%(device_id)s" } ], "type": "L2GatewayServiceConfig", "uuid": "%(uuid)s" } vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_post_lrouter_nat.json0000666000175100017510000000017713244523345026313 0ustar zuulzuul00000000000000{ "_href": "/ws.v1/lrouter/%(lr_uuid)s/nat/%(uuid)s", "type": "%(type)s", "match": %(match_json)s, "uuid": "%(uuid)s" }vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_get_lrouter_lport_att.json0000666000175100017510000000034513244523345027330 0ustar zuulzuul00000000000000{ "LogicalPortAttachment": { %(peer_port_href_field)s %(peer_port_uuid_field)s %(l3_gateway_service_uuid_field)s %(vlan_id)s "type": "%(type)s", "schema": "/ws.v1/schema/%(type)s" } }vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/nvp.ini.full.test0000666000175100017510000000044113244523345024244 0ustar zuulzuul00000000000000[DEFAULT] default_tz_uuid = fake_tz_uuid nova_zone_id = whatever nvp_controllers = fake_1, fake_2 nvp_user = foo nvp_password = bar default_l3_gw_service_uuid = whatever default_l2_gw_service_uuid = whatever nsx_default_interface_name = whatever http_timeout = 3 redirects = 2 retries = 2 vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_get_lrouter.json0000666000175100017510000000162413244523345025241 0ustar zuulzuul00000000000000{ "display_name": "%(display_name)s", %(distributed_json)s "uuid": "%(uuid)s", "tags": %(tags_json)s, "routing_config": { "type": "SingleDefaultRouteImplicitRoutingConfig", "_schema": "/ws.v1/schema/SingleDefaultRouteImplicitRoutingConfig", "default_route_next_hop": { "type": "RouterNextHop", "_schema": "/ws.v1/schema/RouterNextHop", "gateway_ip_address": "%(default_next_hop)s" } }, "_schema": "/ws.v1/schema/LogicalRouterConfig", "_relations": { "LogicalRouterStatus": { "_href": "/ws.v1/lrouter/%(uuid)s/status", "lport_admin_up_count": %(lport_count)d, "_schema": "/ws.v1/schema/LogicalRouterStatus", "lport_count": %(lport_count)d, "fabric_status": %(status)s, "type": "LogicalRouterStatus", "lport_link_up_count": %(lport_count)d } }, "type": "LogicalRouterConfig", "_href": "/ws.v1/lrouter/%(uuid)s" }vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_post_lqueue.json0000666000175100017510000000053513244523345025253 0ustar zuulzuul00000000000000{ "display_name": "%(display_name)s", "uuid": "%(uuid)s", "type": "LogicalSwitchConfig", "_schema": "/ws.v1/schema/LogicalQueueConfig", "dscp": "%(dscp)s", "max_bandwidth_rate": "%(max_bandwidth_rate)s", "min_bandwidth_rate": "%(min_bandwidth_rate)s", "qos_marking": "%(qos_marking)s", "_href": "/ws.v1/lqueue/%(uuid)s" } vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/neutron.conf.test0000666000175100017510000000070513244523345024343 0ustar zuulzuul00000000000000[DEFAULT] # Show debugging output in logs (sets DEBUG log level output) debug = False # Address to bind the API server bind_host = 0.0.0.0 # Port the bind the API server to bind_port = 9696 # MISSING Path to the extensions # api_extensions_path = # Paste configuration file api_paste_config = api-paste.ini.test # The messaging module to use, defaults to kombu. rpc_backend = fake lock_path = $state_path/lock [database] connection = 'sqlite://' vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/nsx.ini.test0000666000175100017510000000047513244523345023317 0ustar zuulzuul00000000000000[DEFAULT] default_tz_uuid = fake_tz_uuid nsx_controllers=fake_1, fake_2 nsx_user=foo nsx_password=bar default_l3_gw_service_uuid = whatever default_l2_gw_service_uuid = whatever [nsxv] manager_uri = https://fake_manager user = fake_user password = fake_password vdn_scope_id = fake_vdn_scope_id dvs_id = fake_dvs_id vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_get_lrouter_lport.json0000666000175100017510000000065113244523345026460 0ustar zuulzuul00000000000000{ "display_name": "%(display_name)s", "admin_status_enabled": "%(admin_status_enabled)s", "_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(uuid)s", "tags": [{"scope": "q_port_id", "tag": "%(neutron_port_id)s"}, {"scope": "os_tid", "tag": "%(tenant_id)s"}], "ip_addresses": %(ip_addresses_json)s, "_schema": "/ws.v1/schema/LogicalRouterPortConfig", "type": "LogicalRouterPortConfig", "uuid": "%(uuid)s" } vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_get_lqueue.json0000666000175100017510000000053513244523345025045 0ustar zuulzuul00000000000000{ "display_name": "%(display_name)s", "uuid": "%(uuid)s", "type": "LogicalSwitchConfig", "_schema": "/ws.v1/schema/LogicalQueueConfig", "dscp": "%(dscp)s", "max_bandwidth_rate": "%(max_bandwidth_rate)s", "min_bandwidth_rate": "%(min_bandwidth_rate)s", "qos_marking": "%(qos_marking)s", "_href": "/ws.v1/lqueue/%(uuid)s" } vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/vcns.ini.test0000666000175100017510000000035613244523345023456 0ustar zuulzuul00000000000000[nsxv] manager_uri = https://fake-host user = fake-user passwordd = fake-password datacenter_moid = fake-moid resource_pool_id = fake-resgroup datastore_id = fake-datastore external_network = fake-ext-net task_status_check_interval = 100 vmware-nsx-12.0.1/vmware_nsx/tests/unit/etc/fake_post_gwservice.json0000666000175100017510000000046313244523345025751 0ustar zuulzuul00000000000000{ "display_name": "%(display_name)s", "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}], "gateways": [ { "transport_node_uuid": "%(transport_node_uuid)s", "device_id": "%(device_id)s", "type": "L2Gateway" } ], "type": "L2GatewayServiceConfig", "uuid": "%(uuid)s" } vmware-nsx-12.0.1/vmware_nsx/tests/unit/dvs/0000775000175100017510000000000013244524600021034 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/dvs/test_utils.py0000666000175100017510000000474013244523345023621 0ustar zuulzuul00000000000000# Copyright (c) 2014 VMware. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from oslo_vmware import api from neutron.tests import base from vmware_nsx.dvs import dvs_utils class DvsUtilsTestCase(base.BaseTestCase): def test_default_configuration(self): self.assertFalse(dvs_utils.dvs_is_enabled()) def _dvs_fake_cfg_set(self): cfg.CONF.set_override('host_ip', 'fake_host_ip', group='dvs') cfg.CONF.set_override('host_username', 'fake_host_user_name', group='dvs') cfg.CONF.set_override('host_password', 'fake_host_pasword', group='dvs') cfg.CONF.set_override('dvs_name', 'fake_dvs', group='dvs') cfg.CONF.set_override('host_port', '443', group='dvs') cfg.CONF.set_override('ca_file', 'cacert', group='dvs') cfg.CONF.set_override('insecure', False, group='dvs') def test_dvs_set(self): self._dvs_fake_cfg_set() self.assertTrue(dvs_utils.dvs_is_enabled()) @mock.patch.object(api.VMwareAPISession, '__init__', return_value=None) def test_dvs_create_session(self, fake_init): dvs_utils.dvs_create_session() fake_init.assert_called_once_with(cfg.CONF.dvs.host_ip, cfg.CONF.dvs.host_username, cfg.CONF.dvs.host_password, cfg.CONF.dvs.api_retry_count, cfg.CONF.dvs.task_poll_interval, port=cfg.CONF.dvs.host_port, cacert=cfg.CONF.dvs.ca_file, insecure=cfg.CONF.dvs.insecure) def test_dvs_name_get(self): cfg.CONF.set_override('dvs_name', 'fake-dvs', group='dvs') self.assertEqual('fake-dvs', dvs_utils.dvs_name_get()) vmware-nsx-12.0.1/vmware_nsx/tests/unit/dvs/__init__.py0000666000175100017510000000000013244523345023142 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/dvs/test_plugin.py0000666000175100017510000003452213244523345023760 0ustar zuulzuul00000000000000# Copyright (c) 2014 VMware. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib import context from oslo_config import cfg from oslo_utils import uuidutils from neutron.tests import base import neutron.tests.unit.db.test_db_base_plugin_v2 as test_plugin from neutron_lib.api.definitions import portbindings from neutron_lib import exceptions as exp from neutron_lib.plugins import directory from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import db as nsx_db from vmware_nsx.dvs import dvs from vmware_nsx.dvs import dvs_utils PLUGIN_NAME = 'vmware_nsx.plugin.NsxDvsPlugin' class fake_session(object): def __init__(self, *ret): self._vim = mock.Mock() def invoke_api(self, *args, **kwargs): pass def wait_for_task(self, task): pass def vim(self): return self._vim class DvsTestCase(base.BaseTestCase): @mock.patch.object(dvs_utils, 'dvs_create_session', return_value=fake_session()) @mock.patch.object(dvs.SingleDvsManager, '_get_dvs_moref_by_name', return_value=mock.MagicMock()) def setUp(self, mock_moref, mock_session): super(DvsTestCase, self).setUp() cfg.CONF.set_override('dvs_name', 'fake_dvs', group='dvs') self._dvs = dvs.SingleDvsManager() self.assertEqual(mock_moref.return_value, self._dvs._dvs_moref) mock_moref.assert_called_once_with(mock_session.return_value, 'fake_dvs') @mock.patch.object(dvs_utils, 'dvs_create_session', return_value=fake_session()) def test_dvs_not_found(self, mock_session): self.assertRaises(nsx_exc.DvsNotFound, dvs.SingleDvsManager) @mock.patch.object(dvs.DvsManager, '_get_port_group_spec', return_value='fake-spec') def test_add_port_group(self, fake_get_spec): self._dvs.add_port_group('fake-uuid', vlan_tag=7) fake_get_spec.assert_called_once_with('fake-uuid', 7, trunk_mode=False) @mock.patch.object(dvs.DvsManager, '_get_port_group_spec', return_value='fake-spec') def test_add_port_group_with_exception(self, fake_get_spec): with ( mock.patch.object(self._dvs._dvs._session, 'wait_for_task', side_effect=exp.NeutronException()) ): self.assertRaises(exp.NeutronException, self._dvs.add_port_group, 'fake-uuid', 7, trunk_mode=False) fake_get_spec.assert_called_once_with('fake-uuid', 7, trunk_mode=False) @mock.patch.object(dvs.DvsManager, '_net_id_to_moref', return_value='fake-moref') def test_delete_port_group(self, fake_get_moref): self._dvs.delete_port_group('fake-uuid') fake_get_moref.assert_called_once_with(mock.ANY, 'fake-uuid') @mock.patch.object(dvs.DvsManager, '_net_id_to_moref', return_value='fake-moref') def test_delete_port_group_with_exception(self, fake_get_moref): with ( mock.patch.object(self._dvs._dvs._session, 'wait_for_task', side_effect=exp.NeutronException()) ): self.assertRaises(exp.NeutronException, self._dvs.delete_port_group, 'fake-uuid') fake_get_moref.assert_called_once_with(mock.ANY, 'fake-uuid') @mock.patch.object(dvs.DvsManager, '_update_vxlan_port_groups_config') @mock.patch.object(dvs.DvsManager, '_get_port_group_spec', return_value='fake-spec') @mock.patch.object(dvs.DvsManager, '_net_id_to_moref', return_value='fake-moref') def test_update_vxlan_net_group_conf(self, fake_get_moref, fake_get_spec, fake_update_vxlan): net_id = 'vxlan-uuid' vlan = 7 self._dvs.add_port_group(net_id, vlan) self._dvs.net_id_to_moref(net_id) fake_get_moref.assert_called_once_with(mock.ANY, net_id) fake_get_spec.assert_called_once_with(net_id, vlan, trunk_mode=False) @mock.patch.object(dvs.DvsManager, '_update_net_port_groups_config') @mock.patch.object(dvs.DvsManager, '_get_port_group_spec', return_value='fake-spec') @mock.patch.object(dvs.DvsManager, '_net_id_to_moref', return_value='dvportgroup-fake-moref') def test_update_flat_net_conf(self, fake_get_moref, fake_get_spec, fake_update_net): net_id = 'flat-uuid' vlan = 7 self._dvs.add_port_group(net_id, vlan) self._dvs.net_id_to_moref(net_id) fake_get_moref.assert_called_once_with(mock.ANY, net_id) fake_get_spec.assert_called_once_with(net_id, vlan, trunk_mode=False) class NeutronSimpleDvsTestCase(test_plugin.NeutronDbPluginV2TestCase): @mock.patch.object(dvs_utils, 'dvs_create_session', return_value=fake_session()) @mock.patch.object(dvs.SingleDvsManager, '_get_dvs_moref_by_name', return_value=mock.MagicMock()) def setUp(self, mock_moref, mock_session, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): # Ensure that DVS is enabled cfg.CONF.set_override('host_ip', 'fake_ip', group='dvs') cfg.CONF.set_override('host_username', 'fake_user', group='dvs') cfg.CONF.set_override('host_password', 'fake_password', group='dvs') cfg.CONF.set_override('dvs_name', 'fake_dvs', group='dvs') super(NeutronSimpleDvsTestCase, self).setUp(plugin=plugin) self._plugin = directory.get_plugin() class NeutronSimpleDvsTest(NeutronSimpleDvsTestCase): def _create_and_delete_dvs_network(self, network_type='flat', vlan_tag=0, trunk_mode=False): params = {'provider:network_type': network_type, 'provider:physical_network': 'fake-moid', 'name': 'fake-name'} if network_type == 'vlan': params['provider:segmentation_id'] = vlan_tag if trunk_mode: params['vlan_transparent'] = True params['arg_list'] = tuple(params.keys()) with mock.patch.object(self._plugin._dvs, 'add_port_group') as mock_add,\ mock.patch.object(self._plugin._dvs, 'delete_port_group') as mock_delete,\ mock.patch.object(dvs.DvsManager, '_get_trunk_vlan_spec') as mock_trunk_vlan: with self.network(**params) as network: ctx = context.get_admin_context() id = network['network']['id'] dvs_id = '%s-%s' % (network['network']['name'], id) binding = nsx_db.get_network_bindings(ctx.session, id) self.assertIsNotNone(binding) if network_type == 'flat': self.assertEqual('flat', binding[0].binding_type) self.assertEqual(0, binding[0].vlan_id) self.assertEqual('dvs', binding[0].phy_uuid) elif network_type == 'vlan': self.assertEqual('vlan', binding[0].binding_type) self.assertEqual(vlan_tag, binding[0].vlan_id) self.assertEqual('dvs', binding[0].phy_uuid) elif network_type == 'portgroup': self.assertEqual('portgroup', binding[0].binding_type) self.assertEqual(0, binding[0].vlan_id) self.assertEqual('fake-moid', binding[0].phy_uuid) else: self.fail() if network_type != 'portgroup': mock_add.assert_called_once_with(dvs_id, vlan_tag, trunk_mode=trunk_mode) else: mock_add.call_count = 0 mock_delete.call_count = 0 if trunk_mode: mock_trunk_vlan.called_once_with(start=0, end=4094) else: mock_trunk_vlan.call_count = 0 def test_create_and_delete_dvs_network_tag(self): self._create_and_delete_dvs_network(network_type='vlan', vlan_tag=7) def test_create_and_delete_dvs_network_flat(self): self._create_and_delete_dvs_network() def test_create_and_delete_dvs_network_flat_vlan_transparent(self): self._create_and_delete_dvs_network(trunk_mode=True) @mock.patch.object(dvs.DvsManager, 'get_port_group_info') @mock.patch.object(dvs.DvsManager, '_net_id_to_moref') def test_create_and_delete_dvs_network_portgroup(self, fake_get_moref, fake_pg_info): fake_pg_info.return_value = {'name': 'fake-name'} self._create_and_delete_dvs_network(network_type='portgroup') self.assertTrue(fake_get_moref.call_count) self.assertTrue(fake_pg_info.call_count) @mock.patch.object(dvs.DvsManager, 'get_port_group_info') @mock.patch.object(dvs.DvsManager, '_net_id_to_moref') def test_create_and_delete_dvs_network_portgroup_vlan(self, fake_get_moref, fake_pg_info): fake_pg_info.return_value = {'name': 'fake-name'} self._create_and_delete_dvs_network(network_type='portgroup', vlan_tag=7) self.assertTrue(fake_get_moref.call_count) self.assertTrue(fake_pg_info.call_count) def test_create_and_delete_dvs_port(self): params = {'provider:network_type': 'vlan', 'provider:physical_network': 'dvs', 'provider:segmentation_id': 7} params['arg_list'] = tuple(params.keys()) with mock.patch.object(self._plugin._dvs, 'add_port_group'),\ mock.patch.object(self._plugin._dvs, 'delete_port_group'): with self.network(**params) as network,\ self.subnet(network) as subnet,\ self.port(subnet) as port: self.assertEqual('dvs', port['port'][portbindings.VIF_TYPE]) port_status = port['port']['status'] self.assertEqual(port_status, 'ACTIVE') def test_create_router_only_dvs_backend(self): data = {'router': {'tenant_id': 'whatever'}} data['router']['name'] = 'router1' data['router']['external_gateway_info'] = {'network_id': 'whatever'} self.assertRaises(exp.BadRequest, self._plugin.create_router, context.get_admin_context(), data) def test_dvs_get_id(self): id = uuidutils.generate_uuid() net = {'name': '', 'id': id} expected = id self.assertEqual(expected, self._plugin._dvs_get_id(net)) net = {'name': 'pele', 'id': id} expected = '%s-%s' % ('pele', id) self.assertEqual(expected, self._plugin._dvs_get_id(net)) name = 'X' * 500 net = {'name': name, 'id': id} expected = '%s-%s' % (name[:43], id) self.assertEqual(expected, self._plugin._dvs_get_id(net)) def test_update_dvs_network(self): """Test update of a DVS network """ params = {'provider:network_type': 'flat', 'admin_state_up': True, 'name': 'test_net', 'tenant_id': 'fake_tenant', 'shared': False, 'port_security_enabled': False} with mock.patch.object(self._plugin._dvs, 'add_port_group'): ctx = context.get_admin_context() # create the initial network network = self._plugin.create_network(ctx, {'network': params}) id = network['id'] # update the different attributes of the DVS network # cannot update the provider type self.assertRaises( exp.InvalidInput, self._plugin.update_network, ctx, id, {'network': {'provider:network_type': 'vlan'}}) # update the Shared attribute self.assertEqual(False, network['shared']) updated_net = self._plugin.update_network( ctx, id, {'network': {'shared': True}}) self.assertEqual(True, updated_net['shared']) # Update the description attribute self.assertIsNone(network['description']) updated_net = self._plugin.update_network( ctx, id, {'network': {'description': 'test'}}) self.assertEqual('test', updated_net['description']) # update the port security attribute self.assertEqual(False, network['port_security_enabled']) updated_net = self._plugin.update_network( ctx, id, {'network': {'port_security_enabled': True}}) self.assertEqual(True, updated_net['port_security_enabled']) @mock.patch.object(dvs.DvsManager, 'get_port_group_info') @mock.patch.object(dvs.DvsManager, '_net_id_to_moref') def test_create_and_delete_portgroup_network_invalid_name(self, fake_get_moref, fake_pg_info): fake_pg_info.return_value = {'name': 'fake-different-name'} data = {'network': {'provider:network_type': 'portgroup', 'name': 'fake-name', 'admin_state_up': True}} self.assertRaises(exp.BadRequest, self._plugin.create_network, context.get_admin_context(), data) vmware-nsx-12.0.1/vmware_nsx/tests/unit/extension_drivers/0000775000175100017510000000000013244524600024012 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/extension_drivers/__init__.py0000666000175100017510000000000013244523345026120 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/extension_drivers/test_dns_integration.py0000666000175100017510000001111513244523345030620 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import dns from neutron_lib import context from neutron_lib.plugins import directory from oslo_config import cfg from vmware_nsx.extension_drivers import dns_integration from vmware_nsx.tests.unit.nsx_v import test_plugin as test_v_plugin from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_v3_plugin NETWORK_DOMAIN_NAME = 'net-domain.com.' NEW_NETWORK_DOMAIN_NAME = 'new-net-domain.com.' PORT_DNS_NAME = 'port-dns-name' NEW_PORT_DNS_NAME = 'new-port-dns-name' class NsxDNSIntegrationTestCase(object): _domain = 'domain.com.' dns_integration.DNS_DRIVER = None def test_create_network_dns_domain(self): with self.network(dns_domain=NETWORK_DOMAIN_NAME, arg_list=(dns.DNSDOMAIN,)) as network: self.assertEqual(NETWORK_DOMAIN_NAME, network['network'][dns.DNSDOMAIN]) def test_update_network_dns_domain(self): with self.network(dns_domain=NETWORK_DOMAIN_NAME, arg_list=(dns.DNSDOMAIN,)) as network: update_data = {'network': {dns.DNSDOMAIN: NEW_NETWORK_DOMAIN_NAME}} updated_network = directory.get_plugin().update_network( context.get_admin_context(), network['network']['id'], update_data) self.assertEqual(NEW_NETWORK_DOMAIN_NAME, updated_network[dns.DNSDOMAIN]) def test_create_port_dns_name(self): with self.port(dns_name=PORT_DNS_NAME, arg_list=(dns.DNSNAME,)) as port: port_data = port['port'] dns_assignment = port_data[dns.DNSASSIGNMENT][0] self.assertEqual(PORT_DNS_NAME, port_data[dns.DNSNAME]) self.assertEqual(PORT_DNS_NAME, dns_assignment['hostname']) self.assertEqual(port_data['fixed_ips'][0]['ip_address'], dns_assignment['ip_address']) self.assertEqual(PORT_DNS_NAME + '.' + self._domain, dns_assignment['fqdn']) def test_update_port_dns_name_ip(self): with self.subnet(cidr='10.0.0.0/24') as subnet: fixed_ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.3'}] with self.port(subnet=subnet, fixed_ips=fixed_ips, dns_name=PORT_DNS_NAME, arg_list=(dns.DNSNAME,)) as port: update_data = {'port': { dns.DNSNAME: NEW_PORT_DNS_NAME, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.4'}]}} updated_port = directory.get_plugin().update_port( context.get_admin_context(), port['port']['id'], update_data) dns_assignment = updated_port[dns.DNSASSIGNMENT][0] self.assertEqual(NEW_PORT_DNS_NAME, updated_port[dns.DNSNAME]) self.assertEqual(NEW_PORT_DNS_NAME, dns_assignment['hostname']) self.assertEqual(updated_port['fixed_ips'][0]['ip_address'], dns_assignment['ip_address']) self.assertEqual(NEW_PORT_DNS_NAME + '.' + self._domain, dns_assignment['fqdn']) class NsxVDNSIntegrationTestCase(NsxDNSIntegrationTestCase, test_v_plugin.NsxVPluginV2TestCase): def setUp(self): cfg.CONF.set_override('nsx_extension_drivers', ['vmware_nsxv_dns']) cfg.CONF.set_override('dns_domain', self._domain) super(NsxVDNSIntegrationTestCase, self).setUp() class NsxV3DNSIntegrationTestCase(NsxDNSIntegrationTestCase, test_v3_plugin.NsxV3PluginTestCaseMixin): def setUp(self): cfg.CONF.set_override('nsx_extension_drivers', ['vmware_nsxv3_dns']) cfg.CONF.set_override('dns_domain', self._domain, 'nsx_v3') super(NsxV3DNSIntegrationTestCase, self).setUp() vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v3/0000775000175100017510000000000013244524600021460 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v3/test_dhcp_metadata.py0000666000175100017510000015171313244523345025666 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import netaddr from oslo_config import cfg from oslo_utils import uuidutils from neutron.extensions import securitygroup as secgrp from neutron_lib.api.definitions import provider_net as pnet from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import utils from vmware_nsx.db import db as nsx_db from vmware_nsx.extensions import advancedserviceproviders as as_providers from vmware_nsx.plugins.nsx_v3 import availability_zones as nsx_az from vmware_nsx.tests.unit.nsx_v3 import test_plugin from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import resources as nsx_resources def set_az_in_config(name, metadata_proxy="metadata_proxy1", dhcp_profile="dhcp_profile1", native_metadata_route="2.2.2.2", dns_domain='aaaa', nameservers=['bbbb']): group_name = 'az:%s' % name cfg.CONF.set_override('availability_zones', [name], group="nsx_v3") config.register_nsxv3_azs(cfg.CONF, [name]) cfg.CONF.set_override("metadata_proxy", metadata_proxy, group=group_name) cfg.CONF.set_override("dhcp_profile", dhcp_profile, group=group_name) cfg.CONF.set_override("native_metadata_route", native_metadata_route, group=group_name) cfg.CONF.set_override("dns_domain", dns_domain, group=group_name) cfg.CONF.set_override("nameservers", nameservers, group=group_name) class NsxNativeDhcpTestCase(test_plugin.NsxV3PluginTestCaseMixin): def setUp(self): super(NsxNativeDhcpTestCase, self).setUp() self._orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification self._orig_native_dhcp_metadata = cfg.CONF.nsx_v3.native_dhcp_metadata cfg.CONF.set_override('dhcp_agent_notification', False) cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3') self._az_name = 'zone1' self.az_metadata_route = '3.3.3.3' set_az_in_config(self._az_name, native_metadata_route=self.az_metadata_route) self._patcher = mock.patch.object(nsx_resources.DhcpProfile, 'get') self._patcher.start() # Need to run some plugin init methods manually because plugin was # started before setUp() overrides CONF.nsx_v3.native_dhcp_metadata. self.plugin.init_availability_zones() self.plugin._translate_configured_names_to_uuids() self.plugin._init_dhcp_metadata() def tearDown(self): self._patcher.stop() cfg.CONF.set_override('dhcp_agent_notification', self._orig_dhcp_agent_notification) cfg.CONF.set_override('native_dhcp_metadata', self._orig_native_dhcp_metadata, 'nsx_v3') super(NsxNativeDhcpTestCase, self).tearDown() def _make_subnet_data(self, name=None, network_id=None, cidr=None, gateway_ip=None, tenant_id=None, allocation_pools=None, enable_dhcp=True, dns_nameservers=None, ip_version=4, host_routes=None, shared=False): return {'subnet': { 'name': name, 'network_id': network_id, 'cidr': cidr, 'gateway_ip': gateway_ip, 'tenant_id': tenant_id, 'allocation_pools': allocation_pools, 'ip_version': ip_version, 'enable_dhcp': enable_dhcp, 'dns_nameservers': dns_nameservers, 'host_routes': host_routes, 'shared': shared}} def _verify_dhcp_service(self, network_id, tenant_id, enabled): # Verify if DHCP service is enabled on a network. port_res = self._list_ports('json', 200, network_id, tenant_id=tenant_id, device_owner=constants.DEVICE_OWNER_DHCP) port_list = self.deserialize('json', port_res) self.assertEqual(len(port_list['ports']) == 1, enabled) def _verify_dhcp_binding(self, subnet, port_data, update_data, assert_data): # Verify if DHCP binding is updated. with mock.patch( 'vmware_nsxlib.v3.resources.LogicalDhcpServer.update_binding' ) as update_dhcp_binding: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id, **port_data) as port: # Retrieve the DHCP binding info created in the DB for the # new port. dhcp_binding = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id'])[0] # Update the port with provided data. self.plugin.update_port( context.get_admin_context(), port['port']['id'], update_data) binding_data = {'mac_address': port['port']['mac_address'], 'ip_address': port['port']['fixed_ips'][0][ 'ip_address']} # Extend basic binding data with to-be-asserted data. binding_data.update(assert_data) # Verify the update call. update_dhcp_binding.assert_called_once_with( dhcp_binding['nsx_service_id'], dhcp_binding['nsx_binding_id'], **binding_data) def test_dhcp_profile_configuration(self): # Test if dhcp_agent_notification and dhcp_profile are # configured correctly. orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification cfg.CONF.set_override('dhcp_agent_notification', True) self.assertRaises(nsx_exc.NsxPluginException, self.plugin._init_dhcp_metadata) cfg.CONF.set_override('dhcp_agent_notification', orig_dhcp_agent_notification) orig_dhcp_profile_uuid = cfg.CONF.nsx_v3.dhcp_profile cfg.CONF.set_override('dhcp_profile', '', 'nsx_v3') self.assertRaises(cfg.RequiredOptError, self.plugin._translate_configured_names_to_uuids) cfg.CONF.set_override('dhcp_profile', orig_dhcp_profile_uuid, 'nsx_v3') def test_dhcp_service_with_create_network(self): # Test if DHCP service is disabled on a network when it is created. with self.network() as network: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_delete_dhcp_network(self): # Test if DHCP service is disabled when directly deleting a network # with a DHCP-enabled subnet. with self.network() as network: with self.subnet(network=network, enable_dhcp=True): self.plugin.delete_network(context.get_admin_context(), network['network']['id']) self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_create_non_dhcp_subnet(self): # Test if DHCP service is disabled on a network when a DHCP-disabled # subnet is created. with self.network() as network: with self.subnet(network=network, enable_dhcp=False): self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_create_multiple_non_dhcp_subnets(self): # Test if DHCP service is disabled on a network when multiple # DHCP-disabled subnets are created. with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=False): with self.subnet(network=network, cidr='20.0.0.0/24', enable_dhcp=False): self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_create_dhcp_subnet(self): # Test if DHCP service is enabled on a network when a DHCP-enabled # subnet is created. with self.network() as network: with self.subnet(network=network, enable_dhcp=True): self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], True) def test_dhcp_service_with_create_dhcp_subnet_bulk(self): # Test if DHCP service is enabled on all networks after a # create_subnet_bulk operation. with self.network() as network1, self.network() as network2: subnet1 = self._make_subnet_data( network_id=network1['network']['id'], cidr='10.0.0.0/24', tenant_id=network1['network']['tenant_id']) subnet2 = self._make_subnet_data( network_id=network2['network']['id'], cidr='20.0.0.0/24', tenant_id=network2['network']['tenant_id']) subnets = {'subnets': [subnet1, subnet2]} with mock.patch.object(self.plugin, '_post_create_subnet' ) as post_create_subnet: self.plugin.create_subnet_bulk( context.get_admin_context(), subnets) # Check if post_create function has been called for # both subnets. self.assertEqual(len(subnets['subnets']), post_create_subnet.call_count) # Check if the bindings to backend DHCP entries are created. dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, network1['network']['id'], nsx_constants.SERVICE_DHCP) self.assertTrue(dhcp_service) dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, network2['network']['id'], nsx_constants.SERVICE_DHCP) self.assertTrue(dhcp_service) def test_dhcp_service_with_create_dhcp_subnet_bulk_failure(self): # Test if user-provided rollback function is invoked when # exception occurred during a create_subnet_bulk operation. with self.network() as network1, self.network() as network2: subnet1 = self._make_subnet_data( network_id=network1['network']['id'], cidr='10.0.0.0/24', tenant_id=network1['network']['tenant_id']) subnet2 = self._make_subnet_data( network_id=network2['network']['id'], cidr='20.0.0.0/24', tenant_id=network2['network']['tenant_id']) subnets = {'subnets': [subnet1, subnet2]} # Inject an exception on the second create_subnet call. orig_create_subnet = self.plugin.create_subnet with mock.patch.object(self.plugin, 'create_subnet') as create_subnet: def side_effect(*args, **kwargs): return self._fail_second_call( create_subnet, orig_create_subnet, *args, **kwargs) create_subnet.side_effect = side_effect with mock.patch.object(self.plugin, '_rollback_subnet') as rollback_subnet: try: self.plugin.create_subnet_bulk( context.get_admin_context(), subnets) except Exception: pass # Check if rollback function has been called for # the subnet in the first network. rollback_subnet.assert_called_once_with(mock.ANY, mock.ANY) subnet_arg = rollback_subnet.call_args[0][0] self.assertEqual(network1['network']['id'], subnet_arg['network_id']) # Check if the bindings to backend DHCP entries are removed. dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, network1['network']['id'], nsx_constants.SERVICE_DHCP) self.assertFalse(dhcp_service) dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, network2['network']['id'], nsx_constants.SERVICE_DHCP) self.assertFalse(dhcp_service) def test_dhcp_service_with_create_dhcp_subnet_in_vlan_network(self): # Test if a DHCP-enabled subnet cannot be created in a vlan network. # on nsx version that does not support it povidernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'tzuuid', pnet.SEGMENTATION_ID: 100} with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='VLAN'),\ mock.patch.object(self.plugin.nsxlib, 'feature_supported', return_value=False),\ self.network(providernet_args=povidernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)) as network: subnet = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.0.0/24', 'enable_dhcp': True}} self.assertRaises( n_exc.InvalidInput, self.plugin.create_subnet, context.get_admin_context(), subnet) def test_dhcp_service_with_create_multiple_dhcp_subnets(self): # Test if multiple DHCP-enabled subnets cannot be created in a network. with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=True): subnet = {'subnet': {'network_id': network['network']['id'], 'cidr': '20.0.0.0/24', 'enable_dhcp': True}} self.assertRaises( n_exc.InvalidInput, self.plugin.create_subnet, context.get_admin_context(), subnet) def test_dhcp_service_with_delete_dhcp_subnet(self): # Test if DHCP service is disabled on a network when a DHCP-disabled # subnet is deleted. with self.network() as network: with self.subnet(network=network, enable_dhcp=True) as subnet: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], True) self.plugin.delete_subnet(context.get_admin_context(), subnet['subnet']['id']) self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_update_dhcp_subnet(self): # Test if DHCP service is enabled on a network when a DHCP-disabled # subnet is updated to DHCP-enabled. with self.network() as network: with self.subnet(network=network, enable_dhcp=False) as subnet: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) data = {'subnet': {'enable_dhcp': True}} self.plugin.update_subnet(context.get_admin_context(), subnet['subnet']['id'], data) self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], True) def test_dhcp_service_with_update_multiple_dhcp_subnets(self): # Test if a DHCP-disabled subnet cannot be updated to DHCP-enabled # if a DHCP-enabled subnet already exists in the same network. with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=True): with self.subnet(network=network, cidr='20.0.0.0/24', enable_dhcp=False) as subnet: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], True) data = {'subnet': {'enable_dhcp': True}} self.assertRaises( n_exc.InvalidInput, self.plugin.update_subnet, context.get_admin_context(), subnet['subnet']['id'], data) def test_dhcp_service_with_update_dhcp_port(self): # Test if DHCP server IP is updated when the corresponding DHCP port # IP is changed. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'update') as update_logical_dhcp_server: with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, subnet['subnet']['network_id'], nsx_constants.SERVICE_DHCP) port = self.plugin.get_port(context.get_admin_context(), dhcp_service['port_id']) old_ip = port['fixed_ips'][0]['ip_address'] new_ip = str(netaddr.IPAddress(old_ip) + 1) data = {'port': {'fixed_ips': [ {'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} self.plugin.update_port(context.get_admin_context(), dhcp_service['port_id'], data) update_logical_dhcp_server.assert_called_once_with( dhcp_service['nsx_service_id'], server_ip=new_ip) def test_dhcp_binding_with_create_port(self): # Test if DHCP binding is added when a compute port is created. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'create_binding', return_value={"id": uuidutils.generate_uuid()} ) as create_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, subnet['subnet']['network_id'], nsx_constants.SERVICE_DHCP) ip = port['port']['fixed_ips'][0]['ip_address'] hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': subnet['subnet']['gateway_ip']}]}} create_dhcp_binding.assert_called_once_with( dhcp_service['nsx_service_id'], port['port']['mac_address'], ip, hostname, cfg.CONF.nsx_v3.dhcp_lease_time, options, subnet['subnet']['gateway_ip']) def test_dhcp_binding_with_create_port_with_opts(self): # Test if DHCP binding is added when a compute port is created # with extra options. opt_name = 'interface-mtu' opt_code = 26 opt_val = '9000' with mock.patch.object(nsx_resources.LogicalDhcpServer, 'create_binding', return_value={"id": uuidutils.generate_uuid()} ) as create_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() extra_dhcp_opts = [{'opt_name': opt_name, 'opt_value': opt_val}] with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id, extra_dhcp_opts=extra_dhcp_opts, arg_list=('extra_dhcp_opts',)) as port: dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, subnet['subnet']['network_id'], nsx_constants.SERVICE_DHCP) ip = port['port']['fixed_ips'][0]['ip_address'] hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': subnet['subnet']['gateway_ip']}]}, 'others': [{'code': opt_code, 'values': [opt_val]}]} create_dhcp_binding.assert_called_once_with( dhcp_service['nsx_service_id'], port['port']['mac_address'], ip, hostname, cfg.CONF.nsx_v3.dhcp_lease_time, options, subnet['subnet']['gateway_ip']) def test_dhcp_binding_with_create_port_with_opts121(self): # Test if DHCP binding is added when a compute port is created # with extra option121. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'create_binding', return_value={"id": uuidutils.generate_uuid()} ) as create_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() extra_dhcp_opts = [{'opt_name': 'classless-static-route', 'opt_value': '1.0.0.0/24,1.2.3.4'}] with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id, extra_dhcp_opts=extra_dhcp_opts, arg_list=('extra_dhcp_opts',)) as port: dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, subnet['subnet']['network_id'], nsx_constants.SERVICE_DHCP) ip = port['port']['fixed_ips'][0]['ip_address'] hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': subnet['subnet']['gateway_ip']}, {'network': '1.0.0.0/24', 'next_hop': '1.2.3.4'}]}} create_dhcp_binding.assert_called_once_with( dhcp_service['nsx_service_id'], port['port']['mac_address'], ip, hostname, cfg.CONF.nsx_v3.dhcp_lease_time, options, subnet['subnet']['gateway_ip']) def test_dhcp_binding_with_create_port_with_bad_opts(self): with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() ctx = context.get_admin_context() # Use illegal opt-name extra_dhcp_opts = [{'opt_name': 'Dummy', 'opt_value': 'Dummy'}] data = {'port': { 'name': 'dummy', 'network_id': subnet['subnet']['network_id'], 'tenant_id': subnet['subnet']['tenant_id'], 'device_owner': device_owner, 'device_id': device_id, 'extra_dhcp_opts': extra_dhcp_opts, 'admin_state_up': True, 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01', }} self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, ctx, data) # Use illegal option121 value extra_dhcp_opts = [{'opt_name': 'classless-static-route', 'opt_value': '1.0.0.0/24,5.5.5.5,cc'}] data['port']['extra_dhcp_opts'] = extra_dhcp_opts self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, ctx, data) def test_dhcp_binding_with_disable_enable_dhcp(self): # Test if DHCP binding is preserved after DHCP is disabled and # re-enabled on a subnet. with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: ip = port['port']['fixed_ips'][0]['ip_address'] dhcp_bindings = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id']) dhcp_service = dhcp_bindings[0]['nsx_service_id'] self.assertEqual(1, len(dhcp_bindings)) self.assertEqual(ip, dhcp_bindings[0]['ip_address']) # Disable DHCP on subnet. data = {'subnet': {'enable_dhcp': False}} self.plugin.update_subnet(context.get_admin_context(), subnet['subnet']['id'], data) dhcp_bindings = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id']) self.assertEqual([], dhcp_bindings) # Re-enable DHCP on subnet. data = {'subnet': {'enable_dhcp': True}} self.plugin.update_subnet(context.get_admin_context(), subnet['subnet']['id'], data) dhcp_bindings = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id']) self.assertEqual(1, len(dhcp_bindings)) self.assertEqual(ip, dhcp_bindings[0]['ip_address']) # The DHCP service ID should be different because a new # logical DHCP server is created for re-enabling DHCP. self.assertNotEqual(dhcp_service, dhcp_bindings[0]['nsx_service_id']) def test_dhcp_binding_with_delete_port(self): # Test if DHCP binding is removed when the associated compute port # is deleted. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'delete_binding') as delete_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: dhcp_binding = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id'])[0] self.plugin.delete_port( context.get_admin_context(), port['port']['id']) delete_dhcp_binding.assert_called_once_with( dhcp_binding['nsx_service_id'], dhcp_binding['nsx_binding_id']) def test_dhcp_binding_with_update_port_delete_ip(self): # Test if DHCP binding is deleted when the IP of the associated # compute port is deleted. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'delete_binding') as delete_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: dhcp_binding = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id'])[0] data = {'port': {'fixed_ips': [], 'admin_state_up': False, secgrp.SECURITYGROUPS: []}} self.plugin.update_port( context.get_admin_context(), port['port']['id'], data) delete_dhcp_binding.assert_called_once_with( dhcp_binding['nsx_service_id'], dhcp_binding['nsx_binding_id']) def test_dhcp_binding_with_update_port_ip(self): # Test if DHCP binding is updated when the IP of the associated # compute port is changed. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: port_data = {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.3'}]} new_ip = '10.0.0.4' update_data = {'port': {'fixed_ips': [ {'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} assert_data = {'host_name': 'host-%s' % new_ip.replace('.', '-'), 'ip_address': new_ip, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': new_ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_dhcp_binding_with_update_port_mac(self): # Test if DHCP binding is updated when the Mac of the associated # compute port is changed. with self.subnet(enable_dhcp=True) as subnet: port_data = {'mac_address': '11:22:33:44:55:66'} new_mac = '22:33:44:55:66:77' update_data = {'port': {'mac_address': new_mac}} assert_data = {'mac_address': new_mac, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': mock.ANY}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_dhcp_binding_with_update_port_mac_ip(self): # Test if DHCP binding is updated when the IP and Mac of the associated # compute port are changed at the same time. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: port_data = {'mac_address': '11:22:33:44:55:66', 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.3'}]} new_mac = '22:33:44:55:66:77' new_ip = '10.0.0.4' update_data = {'port': {'mac_address': new_mac, 'fixed_ips': [ {'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} assert_data = {'host_name': 'host-%s' % new_ip.replace('.', '-'), 'mac_address': new_mac, 'ip_address': new_ip, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': new_ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_update_port_with_update_dhcp_opt(self): # Test updating extra-dhcp-opts via port update. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: mac_address = '11:22:33:44:55:66' ip_addr = '10.0.0.3' port_data = {'arg_list': ('extra_dhcp_opts',), 'mac_address': mac_address, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': ip_addr}], 'extra_dhcp_opts': [ {'opt_name': 'interface-mtu', 'opt_value': '9000'}]} update_data = {'port': {'extra_dhcp_opts': [ {'opt_name': 'interface-mtu', 'opt_value': '9002'}]}} assert_data = {'mac_address': mac_address, 'ip_address': ip_addr, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': ip_addr}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}, 'others': [{'code': 26, 'values': ['9002']}]}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_update_port_with_adding_dhcp_opt(self): # Test adding extra-dhcp-opts via port update. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: mac_address = '11:22:33:44:55:66' ip_addr = '10.0.0.3' port_data = {'arg_list': ('extra_dhcp_opts',), 'mac_address': mac_address, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': ip_addr}], 'extra_dhcp_opts': [ {'opt_name': 'nis-domain', 'opt_value': 'abc'}]} update_data = {'port': {'extra_dhcp_opts': [ {'opt_name': 'interface-mtu', 'opt_value': '9002'}]}} assert_data = {'mac_address': mac_address, 'ip_address': ip_addr, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': ip_addr}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}, 'others': [{'code': 26, 'values': ['9002']}, {'code': 40, 'values': ['abc']}]}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_update_port_with_deleting_dhcp_opt(self): # Test adding extra-dhcp-opts via port update. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: mac_address = '11:22:33:44:55:66' ip_addr = '10.0.0.3' port_data = {'arg_list': ('extra_dhcp_opts',), 'mac_address': mac_address, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': ip_addr}], 'extra_dhcp_opts': [ {'opt_name': 'nis-domain', 'opt_value': 'abc'}, {'opt_name': 'interface-mtu', 'opt_value': '9002'}]} update_data = {'port': {'extra_dhcp_opts': [ {'opt_name': 'interface-mtu', 'opt_value': None}]}} assert_data = {'mac_address': mac_address, 'ip_address': ip_addr, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': ip_addr}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}, 'others': [{'code': 40, 'values': ['abc']}]}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_dhcp_binding_with_update_port_name(self): # Test if DHCP binding is not updated when the name of the associated # compute port is changed. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'update_binding') as update_dhcp_binding: with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id, name='abc') as port: data = {'port': {'name': 'xyz'}} self.plugin.update_port( context.get_admin_context(), port['port']['id'], data) update_dhcp_binding.assert_not_called() def test_create_network_with_bad_az_hint(self): p = directory.get_plugin() ctx = context.get_admin_context() data = {'network': { 'name': 'test-az', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False, 'availability_zone_hints': ['bad_hint'] }} self.assertRaises(n_exc.NeutronException, p.create_network, ctx, data) def test_create_network_with_az_hint(self): p = directory.get_plugin() ctx = context.get_admin_context() data = {'network': { 'name': 'test-az', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False, 'availability_zone_hints': [self._az_name] }} # network creation should succeed net = p.create_network(ctx, data) self.assertEqual([self._az_name], net['availability_zone_hints']) self.assertEqual([self._az_name], net['availability_zones']) def test_create_network_with_no_az_hint(self): p = directory.get_plugin() ctx = context.get_admin_context() data = {'network': { 'name': 'test-az', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False }} # network creation should succeed net = p.create_network(ctx, data) self.assertEqual([], net['availability_zone_hints']) self.assertEqual([nsx_az.DEFAULT_NAME], net['availability_zones']) def test_dhcp_service_with_create_az_network(self): # Test if DHCP service is disabled on a network when it is created. with self.network(availability_zone_hints=[self._az_name], arg_list=('availability_zone_hints',)) as network: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_binding_with_create_az_port(self): # Test if DHCP binding is added when a compute port is created. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'create_binding', return_value={"id": uuidutils.generate_uuid()} ) as create_dhcp_binding: with self.network( availability_zone_hints=[self._az_name], arg_list=('availability_zone_hints',)) as network: with self.subnet(enable_dhcp=True, network=network) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, subnet['subnet']['network_id'], nsx_constants.SERVICE_DHCP) ip = port['port']['fixed_ips'][0]['ip_address'] hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % self.az_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % self.az_metadata_route, 'next_hop': ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': subnet['subnet']['gateway_ip']}]}} create_dhcp_binding.assert_called_once_with( dhcp_service['nsx_service_id'], port['port']['mac_address'], ip, hostname, cfg.CONF.nsx_v3.dhcp_lease_time, options, subnet['subnet']['gateway_ip']) def test_create_subnet_with_dhcp_port(self): with self.subnet(enable_dhcp=True) as subnet: # find the dhcp port and verify it has port security disabled ports = self.plugin.get_ports( context.get_admin_context()) self.assertEqual(1, len(ports)) self.assertEqual('network:dhcp', ports[0]['device_owner']) self.assertEqual(subnet['subnet']['network_id'], ports[0]['network_id']) self.assertEqual(False, ports[0]['port_security_enabled']) class NsxNativeMetadataTestCase(test_plugin.NsxV3PluginTestCaseMixin): def setUp(self): super(NsxNativeMetadataTestCase, self).setUp() self._orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification self._orig_native_dhcp_metadata = cfg.CONF.nsx_v3.native_dhcp_metadata cfg.CONF.set_override('dhcp_agent_notification', False) cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3') self._az_name = 'zone1' self._az_metadata_proxy = 'dummy' set_az_in_config(self._az_name, metadata_proxy=self._az_metadata_proxy) self._patcher = mock.patch.object(nsx_resources.MetaDataProxy, 'get') self._patcher.start() self.plugin.init_availability_zones() self.plugin._translate_configured_names_to_uuids() self.plugin._init_dhcp_metadata() def tearDown(self): self._patcher.stop() cfg.CONF.set_override('dhcp_agent_notification', self._orig_dhcp_agent_notification) cfg.CONF.set_override('native_dhcp_metadata', self._orig_native_dhcp_metadata, 'nsx_v3') super(NsxNativeMetadataTestCase, self).tearDown() def test_metadata_proxy_configuration(self): # Test if dhcp_agent_notification and metadata_proxy are # configured correctly. orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification cfg.CONF.set_override('dhcp_agent_notification', True) self.assertRaises(nsx_exc.NsxPluginException, self.plugin._init_dhcp_metadata) cfg.CONF.set_override('dhcp_agent_notification', orig_dhcp_agent_notification) orig_metadata_proxy_uuid = cfg.CONF.nsx_v3.metadata_proxy cfg.CONF.set_override('metadata_proxy', '', 'nsx_v3') self.assertRaises(cfg.RequiredOptError, self.plugin._translate_configured_names_to_uuids) cfg.CONF.set_override('metadata_proxy', orig_metadata_proxy_uuid, 'nsx_v3') def test_metadata_proxy_with_create_network(self): # Test if native metadata proxy is enabled on a network when it is # created. with mock.patch.object(nsx_resources.LogicalPort, 'create') as create_logical_port: with self.network() as network: nsx_net_id = self.plugin._get_network_nsx_id( context.get_admin_context(), network['network']['id']) tags = self.plugin.nsxlib.build_v3_tags_payload( network['network'], resource_type='os-neutron-net-id', project_name=None) name = utils.get_name_and_uuid('%s-%s' % ( 'mdproxy', network['network']['name'] or 'network'), network['network']['id']) create_logical_port.assert_called_once_with( nsx_net_id, cfg.CONF.nsx_v3.metadata_proxy, tags=tags, name=name, attachment_type=nsx_constants.ATTACHMENT_MDPROXY) def test_metadata_proxy_with_create_az_network(self): # Test if native metadata proxy is enabled on a network when it is # created. with mock.patch.object(nsx_resources.LogicalPort, 'create') as create_logical_port: with self.network( availability_zone_hints=[self._az_name], arg_list=('availability_zone_hints',)) as network: nsx_net_id = self.plugin._get_network_nsx_id( context.get_admin_context(), network['network']['id']) tags = self.plugin.nsxlib.build_v3_tags_payload( network['network'], resource_type='os-neutron-net-id', project_name=None) name = utils.get_name_and_uuid('%s-%s' % ( 'mdproxy', network['network']['name'] or 'network'), network['network']['id']) create_logical_port.assert_called_once_with( nsx_net_id, self._az_metadata_proxy, tags=tags, name=name, attachment_type=nsx_constants.ATTACHMENT_MDPROXY) def test_metadata_proxy_with_get_subnets(self): # Test if get_subnets() handles advanced-service-provider extension, # which is used when processing metadata requests. with self.network() as n1, self.network() as n2: with self.subnet(network=n1) as s1, self.subnet(network=n2) as s2: # Get all the subnets. subnets = self._list('subnets')['subnets'] self.assertEqual(len(subnets), 2) self.assertEqual(set([s['id'] for s in subnets]), set([s1['subnet']['id'], s2['subnet']['id']])) lswitch_id = nsx_db.get_nsx_switch_ids( context.get_admin_context().session, n1['network']['id'])[0] # Get only the subnets associated with a particular advanced # service provider (i.e. logical switch). subnets = self._list('subnets', query_params='%s=%s' % (as_providers.ADV_SERVICE_PROVIDERS, lswitch_id))['subnets'] self.assertEqual(len(subnets), 1) self.assertEqual(subnets[0]['id'], s1['subnet']['id']) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v3/test_fwaas_v1_driver.py0000666000175100017510000003646013244523345026173 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron_lib.plugins import directory from vmware_nsx.services.fwaas.nsx_v3 import edge_fwaas_driver_base from vmware_nsx.services.fwaas.nsx_v3 import edge_fwaas_driver_v1 as \ edge_fwaas_driver from vmware_nsx.services.fwaas.nsx_v3 import fwaas_callbacks_v1 from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_v3_plugin from vmware_nsxlib.v3 import nsx_constants as consts FAKE_FW_ID = 'fake_fw_uuid' FAKE_ROUTER_ID = 'fake_rtr_uuid' MOCK_NSX_ID = 'nsx_router_id' FAKE_PORT_ID = 'fake_port_uuid' FAKE_NET_ID = 'fake_net_uuid' FAKE_NSX_PORT_ID = 'fake_nsx_port_uuid' MOCK_DEFAULT_RULE_ID = 'nsx_default_rule_id' MOCK_SECTION_ID = 'sec_id' DEFAULT_RULE = {'is_default': True, 'display_name': edge_fwaas_driver_base.DEFAULT_RULE_NAME, 'id': MOCK_DEFAULT_RULE_ID, 'action': consts.FW_ACTION_DROP} class Nsxv3FwaasTestCase(test_v3_plugin.NsxV3PluginTestCaseMixin): def setUp(self): super(Nsxv3FwaasTestCase, self).setUp() self.firewall = edge_fwaas_driver.EdgeFwaasV3DriverV1() # Start some nsxlib/DB mocks mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalRouter." "get_firewall_section_id", return_value=MOCK_SECTION_ID).start() mock.patch( "vmware_nsxlib.v3.security.NsxLibFirewallSection." "get_default_rule", return_value={'id': MOCK_DEFAULT_RULE_ID}).start() mock.patch( "vmware_nsx.db.db.get_nsx_router_id", return_value=MOCK_NSX_ID).start() self.plugin = directory.get_plugin() self.plugin.fwaas_callbacks = fwaas_callbacks_v1.\ Nsxv3FwaasCallbacksV1() self.plugin.fwaas_callbacks.fwaas_enabled = True self.plugin.fwaas_callbacks.fwaas_driver = self.firewall self.plugin.fwaas_callbacks.internal_driver = self.firewall self.plugin.init_is_complete = True def _default_rule(self, drop=True): rule = DEFAULT_RULE if drop: rule['action'] = consts.FW_ACTION_DROP else: rule['action'] = consts.FW_ACTION_ALLOW return rule def _fake_rules_v4(self): rule1 = {'enabled': True, 'action': 'allow', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '80', 'source_ip_address': '10.24.4.2', 'id': 'fake-fw-rule1', 'description': 'first rule'} rule2 = {'enabled': True, 'action': 'reject', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '22:24', 'source_port': '1:65535', 'id': 'fake-fw-rule2'} rule3 = {'enabled': True, 'action': 'deny', 'ip_version': 4, 'protocol': 'icmp', 'id': 'fake-fw-rule3'} rule4 = {'enabled': True, 'action': 'deny', 'ip_version': 4, 'source_ip_address': '10.25.5.2', 'id': 'fake-fw-rule4'} return [rule1, rule2, rule3, rule4] def _fake_translated_rules(self, logged=False): # The expected translation of the rules in _fake_rules_v4 service1 = {'l4_protocol': 'TCP', 'resource_type': 'L4PortSetNSService', 'destination_ports': ['80'], 'source_ports': []} rule1 = {'action': 'ALLOW', 'services': [{'service': service1}], 'sources': [{'target_id': '10.24.4.2', 'target_type': 'IPv4Address'}], 'display_name': 'Fwaas-fake-fw-rule1', 'notes': 'first rule'} service2 = {'l4_protocol': 'TCP', 'resource_type': 'L4PortSetNSService', 'destination_ports': ['22-24'], 'source_ports': ['1-65535']} rule2 = {'action': 'DROP', # Reject is replaced with deny 'services': [{'service': service2}], 'display_name': 'Fwaas-fake-fw-rule2'} service3_1 = {'resource_type': 'ICMPTypeNSService', 'protocol': 'ICMPv4'} service3_2 = {'resource_type': 'ICMPTypeNSService', 'protocol': 'ICMPv6'} rule3 = {'action': 'DROP', # icmp is translated to icmp v4 & v6 'services': [{'service': service3_1}, {'service': service3_2}], 'display_name': 'Fwaas-fake-fw-rule3'} rule4 = {'action': 'DROP', 'sources': [{'target_id': '10.25.5.2', 'target_type': 'IPv4Address'}], 'display_name': 'Fwaas-fake-fw-rule4'} if logged: for rule in (rule1, rule2, rule3, rule4): rule['logged'] = logged return [rule1, rule2, rule3, rule4] def _fake_firewall_no_rule(self): rule_list = [] fw_inst = {'id': FAKE_FW_ID, 'admin_state_up': True, 'tenant_id': 'tenant-uuid', 'firewall_rule_list': rule_list} return fw_inst def _fake_firewall(self, rule_list): _rule_list = copy.deepcopy(rule_list) for rule in _rule_list: rule['position'] = str(_rule_list.index(rule)) fw_inst = {'id': FAKE_FW_ID, 'admin_state_up': True, 'tenant_id': 'tenant-uuid', 'firewall_rule_list': _rule_list} return fw_inst def _fake_firewall_with_admin_down(self, rule_list): fw_inst = {'id': FAKE_FW_ID, 'admin_state_up': False, 'tenant_id': 'tenant-uuid', 'firewall_rule_list': rule_list} return fw_inst def _fake_apply_list(self, router_count=1): apply_list = [] while router_count > 0: router_inst = {'id': FAKE_ROUTER_ID} router_info_inst = mock.Mock() router_info_inst.router = router_inst apply_list.append(router_info_inst) router_count -= 1 return apply_list def _setup_firewall_with_rules(self, func, router_count=1): apply_list = self._fake_apply_list(router_count=router_count) rule_list = self._fake_rules_v4() firewall = self._fake_firewall(rule_list) with mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection." "update") as update_fw, \ mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[]), \ mock.patch.object(self.plugin, 'get_ports', return_value=[]), \ mock.patch.object(self.plugin, 'get_router', return_value=apply_list[0]), \ mock.patch.object(self.plugin.fwaas_callbacks, '_get_router_firewall_id', return_value=firewall['id']), \ mock.patch.object(self.plugin.fwaas_callbacks, '_get_fw_from_plugin', return_value=firewall): func('nsx', apply_list, firewall) self.assertEqual(router_count, update_fw.call_count) update_fw.assert_called_with( MOCK_SECTION_ID, rules=self._fake_translated_rules() + [self._default_rule()]) def test_create_firewall_no_rules(self): apply_list = self._fake_apply_list() firewall = self._fake_firewall_no_rule() initial_tags = [{'scope': 'xxx', 'tag': 'yyy'}] with mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection." "update") as update_fw,\ mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[]), \ mock.patch.object(self.plugin, 'get_ports', return_value=[]), \ mock.patch.object(self.plugin, 'get_router', return_value=apply_list[0]), \ mock.patch.object(self.plugin.fwaas_callbacks, '_get_router_firewall_id', return_value=firewall['id']), \ mock.patch.object(self.plugin.fwaas_callbacks, '_get_fw_from_plugin', return_value=firewall), \ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalRouter." "update") as update_rtr,\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalRouter." "get", return_value={'tags': initial_tags}) as get_rtr: self.firewall.create_firewall('nsx', apply_list, firewall) update_fw.assert_called_once_with( MOCK_SECTION_ID, rules=[self._default_rule()]) get_rtr.assert_called_once_with(MOCK_NSX_ID) expected_tags = initial_tags expected_tags.append({'scope': edge_fwaas_driver.NSX_FW_TAG, 'tag': firewall['id']}) update_rtr.assert_called_once_with(MOCK_NSX_ID, tags=expected_tags) def test_create_firewall_with_rules(self): self._setup_firewall_with_rules(self.firewall.create_firewall) def test_create_firewall_with_rules_two_routers(self): self._setup_firewall_with_rules(self.firewall.create_firewall, router_count=2) def test_update_firewall_with_rules(self): self._setup_firewall_with_rules(self.firewall.update_firewall) def test_delete_firewall(self): apply_list = self._fake_apply_list() firewall = self._fake_firewall_no_rule() initial_tags = [{'scope': 'xxx', 'tag': 'yyy'}, {'scope': edge_fwaas_driver.NSX_FW_TAG, 'tag': firewall['id']}] with mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection." "update") as update_fw,\ mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[]), \ mock.patch.object(self.plugin, 'get_router', return_value=apply_list[0]), \ mock.patch.object(self.plugin.fwaas_callbacks, '_get_router_firewall_id', return_value=None), \ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalRouter." "update") as update_rtr,\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalRouter." "get", return_value={'tags': initial_tags}) as get_rtr: self.firewall.delete_firewall('nsx', apply_list, firewall) update_fw.assert_called_once_with( MOCK_SECTION_ID, rules=[self._default_rule(drop=False)]) get_rtr.assert_called_once_with(MOCK_NSX_ID) expected_tags = initial_tags expected_tags.pop() expected_tags.append({'scope': edge_fwaas_driver.NSX_FW_TAG, 'tag': firewall['id']}) update_rtr.assert_called_once_with(MOCK_NSX_ID, tags=expected_tags) def test_create_firewall_with_admin_down(self): apply_list = self._fake_apply_list() rule_list = self._fake_rules_v4() firewall = self._fake_firewall_with_admin_down(rule_list) with mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection." "update") as update_fw, \ mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[]), \ mock.patch.object(self.plugin, 'get_ports', return_value=[]), \ mock.patch.object(self.plugin, 'get_router', return_value=apply_list[0]), \ mock.patch.object(self.plugin.fwaas_callbacks, '_get_router_firewall_id', return_value=firewall['id']), \ mock.patch.object(self.plugin.fwaas_callbacks, '_get_fw_from_plugin', return_value=firewall): self.firewall.create_firewall('nsx', apply_list, firewall) update_fw.assert_called_once_with( MOCK_SECTION_ID, rules=[self._default_rule()]) def test_create_firewall_with_dhcp_relay(self): apply_list = self._fake_apply_list() firewall = self._fake_firewall_no_rule() relay_server = '1.1.1.1' port = {'id': FAKE_PORT_ID, 'network_id': FAKE_NET_ID} with mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection." "update") as update_fw,\ mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[port]), \ mock.patch.object(self.plugin, 'get_ports', return_value=[port]), \ mock.patch.object(self.plugin, 'get_router', return_value=apply_list[0]), \ mock.patch.object(self.plugin, '_get_port_relay_servers', return_value=[relay_server]),\ mock.patch.object(self.plugin.fwaas_callbacks, '_get_router_firewall_id', return_value=firewall['id']), \ mock.patch.object(self.plugin.fwaas_callbacks, '_get_fw_from_plugin', return_value=firewall): self.firewall.create_firewall('nsx', apply_list, firewall) # expecting 2 allow rules for the relay servers + default rule expected_rules = expected_rules = [ {'display_name': "DHCP Relay ingress traffic", 'action': consts.FW_ACTION_ALLOW, 'destinations': None, 'sources': [{'target_id': relay_server, 'target_type': 'IPv4Address'}], 'services': self.plugin._get_port_relay_services(), 'direction': 'IN'}, {'display_name': "DHCP Relay egress traffic", 'action': consts.FW_ACTION_ALLOW, 'sources': None, 'destinations': [{'target_id': relay_server, 'target_type': 'IPv4Address'}], 'services': self.plugin._get_port_relay_services(), 'direction': 'OUT'}, self._default_rule() ] update_fw.assert_called_once_with( MOCK_SECTION_ID, rules=expected_rules) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v3/test_constants.py0000666000175100017510000000120413244523345025111 0ustar zuulzuul00000000000000# Copyright (c) 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. PLUGIN_NAME = 'vmware_nsx.plugins.nsx_v3.plugin.NsxV3Plugin' vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v3/__init__.py0000666000175100017510000000000013244523345023566 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v3/test_client_cert.py0000666000175100017510000002056113244523345025377 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import mock from oslo_config import cfg from neutron.tests.unit import testlib_api from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.plugins.nsx_v3 import cert_utils from vmware_nsx.plugins.nsx_v3 import utils class NsxV3ClientCertProviderTestCase(testlib_api.SqlTestCase): CERT = "-----BEGIN CERTIFICATE-----\n" \ "MIIDJTCCAg0CBFh36j0wDQYJKoZIhvcNAQELBQAwVzELMAkGA1UEBhMCVVMxEzAR\n" \ "BgNVBAgMCkNhbGlmb3JuaWExDjAMBgNVBAoMBU15T3JnMQ8wDQYDVQQLDAZNeVVu\n" \ "aXQxEjAQBgNVBAMMCW15b3JnLmNvbTAeFw0xNzAxMTIyMDQyMzdaFw0yNzAxMTAy\n" \ "MDQyMzdaMFcxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMQ4wDAYD\n" \ "VQQKDAVNeU9yZzEPMA0GA1UECwwGTXlVbml0MRIwEAYDVQQDDAlteW9yZy5jb20w\n" \ "ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC/wsYintlWVaSeXwaSrdPa\n" \ "+AHtL1ooH7q0uf6tt+6Rwiy10YRjAVJhapj9995gqgJ2402J+3gzNXLCbXjjDR/D\n" \ "9xjAzKHu61r0AVNd9/0+8yXQrEDuzlwHSCKz+zjq5ZEZ7RkLIUdreaZJFPTCwry3\n" \ "wuTnBfqcE7xWl6WfWR8evooV+ZzIfjQdoSliIyn3YGxNN5pc1P40qt0pxOsNBGXG\n" \ "2FIZXpML8TpKw0ga/wE70CJd6tRvSsAADxQXehfKvGtHvlJYS+3cTahC7reQXJnc\n" \ "qsjgYkiWyhhR4jdcTD/tDlVcJroM1jFVxpsCg/AU3srWWWeAGyVe42ZhqWVf0Urz\n" \ "AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAA/lLfmXe8wPyBhN/VMb5bu5Ey56qz+j\n" \ "jCn7tz7FjRvsB9P0fLUDOBKNwyon3yopDNYJ4hnm4yKoHCHURQLZKWHzm0XKzE+4\n" \ "cA/M13M8OEg5otnVVHhz1FPQWnJq7bLHh/KXYcc5Rkc7UeHEPj0sDjfUjCPGdepc\n" \ "Ghu1ZcgHsL4JCuvcadG+RFGeDTug3yO92Fj2uFy5DlzzWOZSi4otpZRd9JZkAtZ1\n" \ "umZRBJ2A504nJx4MplmNqvLNkmxMLKQdvZYNNiYr6icOavDOJA5RhzgoppJZkV2w\n" \ "v2oC+8BFarXnZSk37HAWjwcaqzBLbIyPYpClW5IYMr8LiixSBACc+4w=\n" \ "-----END CERTIFICATE-----\n" PKEY = "-----BEGIN PRIVATE KEY-----\n" \ "MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQC/wsYintlWVaSe\n" \ "XwaSrdPa+AHtL1ooH7q0uf6tt+6Rwiy10YRjAVJhapj9995gqgJ2402J+3gzNXLC\n" \ "bXjjDR/D9xjAzKHu61r0AVNd9/0+8yXQrEDuzlwHSCKz+zjq5ZEZ7RkLIUdreaZJ\n" \ "FPTCwry3wuTnBfqcE7xWl6WfWR8evooV+ZzIfjQdoSliIyn3YGxNN5pc1P40qt0p\n" \ "xOsNBGXG2FIZXpML8TpKw0ga/wE70CJd6tRvSsAADxQXehfKvGtHvlJYS+3cTahC\n" \ "7reQXJncqsjgYkiWyhhR4jdcTD/tDlVcJroM1jFVxpsCg/AU3srWWWeAGyVe42Zh\n" \ "qWVf0UrzAgMBAAECggEBAJrGuie9cQy3KZzOdD614RaPMPbhTnKuUYOH0GEk4YFy\n" \ "aaYDS0iiC30njf8HLs10y3JsOuyRNU6X6F24AGe68xW3/pm3UUjHXG0wGLry68wA\n" \ "c1g/gFV/6FXUSnZc4m7uBjUX4yvRm5TK5oV8TaZZifsEar9xWvrZDx4RXpQEWhL0\n" \ "L/TyrOZSfRtBgdWX6Ag4XQVsCfZxJoCi2ZyvaMBsWTH06x9AGo1Io5t1AmA9Hsfb\n" \ "6BsSz186nqb0fq4UMfrWrSCz7M/1s03+hBOVICH2TdaRDZLtDVa1b2x4sFpfdp9t\n" \ "VVxuSHxcmvzOPMIv3NXwj0VitTYYJDBFKoEfx1mzhNkCgYEA59gYyBfpsuCOevP2\n" \ "tn7IeysbtaoKDzHE+ksjs3sAn6Vr2Y0Lbed26NpdIVL6u3HAteJxqrIh0zpkpAtp\n" \ "akdqlj86oRaBUqLXxK3QNpUx19f7KN7UsVAbzUJSOm2n1piPg261ktfhtms2rxnQ\n" \ "+9yluINu+z1wS4FG9SwrRmwwfsUCgYEA072Ma1sj2MER5tmQw1zLANkzP1PAkUdy\n" \ "+oDuJmU9A3/+YSIkm8dGprFglPkLUaf1B15oN6wCJVMpB1lza3PM/YT70rpqc7cq\n" \ "PHJXQlZFMBhyVfIkCv3wICTLD5phhgAWlzlwm094f2uAnbG6WUkrVfZajuh0pW53\n" \ "1i0OTfxAvlcCgYEAkDB2oSM2JhjApDlMbA2HtAqIbkA1h2OlpSDMMFjEd4WTALdW\n" \ "r2CwNHtyRkJsS92gQ750gPvOS6daZifuxLlr0cu7M+piPbmnRdvvzbKWUC40NyP2\n" \ "1dwDnnGr4EjIhI9XWh+lb5EyAJjHZrlAnxOIQawEft6kE2FwdxSkSWUJ+B0CgYEA\n" \ "n2xYDXzRwKGdmPK2zGFRd5IRw9yLYNcq+vGYXdBb4Aa+wOO0LJYd2+Qxk/jvTMvo\n" \ "8WNjlIcuFmxGuAHhpUXLUhaOhFtXS0jdxCVTDd9muI+vhoaKHLyVz53kRhs20m2+\n" \ "lJ3q6wUq9MU8UX8/j3pH5rFV/cOIEAbcs6W4337OQIECgYEAoLtQyqXjH45FlCQx\n" \ "xK8dY+GuxIP+TIwiq23yhu3e+3LIgXJw8DwBFN5yJyH2HMnhGkD4PurEx2sGHeLO\n" \ "EG6L8PNDOxpvSzcgxwmZsUK6j3nAbKycF3PDDXA4kt8WDXBr86OMQsFtpjeO+fGh\n" \ "YWJa+OKc2ExdeMewe9gKIDQ5stw=\n" \ "-----END PRIVATE KEY-----\n" def _init_config(self, storage_type='nsx-db', password=None, cert_file=None): cfg.CONF.set_override('nsx_use_client_auth', True, 'nsx_v3') cfg.CONF.set_override('nsx_client_cert_storage', storage_type, 'nsx_v3') cfg.CONF.set_override('nsx_client_cert_file', cert_file, 'nsx_v3') cfg.CONF.set_override('nsx_client_cert_pk_password', password, 'nsx_v3') # pk password secret is cached - reset it for each test cert_utils.reset_secret() self._provider = utils.get_client_cert_provider() def validate_db_provider(self, expected_cert_data): fname = None with self._provider() as p: # verify cert data was exported to CERTFILE fname = p.filename() with open(fname, 'r') as f: actual = f.read() self.assertEqual(expected_cert_data, actual) # after with statement, cert file should be deleted self.assertFalse(os.path.isfile(fname)) def validate_basic_provider(self, expected_cert_data): fname = None with self._provider as p: fname = p.filename() with open(fname, 'r') as f: actual = f.read() self.assertEqual(expected_cert_data, actual) # with statement should not touch the file self.assertTrue(os.path.isfile(fname)) def test_db_provider_without_cert(self): """Verify init fails if no cert is provided in client cert mode""" # certificate not generated - exception should be raised self._init_config() # no certificate in table mock.patch( "vmware_nsx.db.db.get_certificate", return_value=(None, None)).start() self.assertRaises(nsx_exc.ClientCertificateException, self._provider().__enter__) # now verify return to normal after failure mock.patch( "vmware_nsx.db.db.get_certificate", return_value=(self.CERT, self.PKEY)).start() self.validate_db_provider(self.CERT + self.PKEY) def test_db_provider_with_cert(self): """Verify successful certificate load from storage""" self._init_config() mock.patch( "vmware_nsx.db.db.get_certificate", return_value=(self.CERT, self.PKEY)).start() self.validate_db_provider(self.CERT + self.PKEY) def test_db_provider_with_encryption(self): """Verify successful encrypted PK load from storage""" password = 'topsecret' self._init_config(password=password) secret = cert_utils.generate_secret_from_password(password) encrypted_pkey = cert_utils.symmetric_encrypt(secret, self.PKEY) # db should contain encrypted key mock.patch( "vmware_nsx.db.db.get_certificate", return_value=(self.CERT, encrypted_pkey)).start() self.validate_db_provider(self.CERT + self.PKEY) def test_db_provider_with_bad_decrypt(self): """Verify loading plaintext PK from storage fails in encrypt mode""" mock.patch( "vmware_nsx.db.db.get_certificate", return_value=(self.CERT, self.PKEY)).start() # after decrypt failure, cert will be deleted mock.patch( "vmware_nsx.db.db.delete_certificate").start() self._init_config(password='topsecret') # since PK in DB is not encrypted, we should fail to decrypt it on load self.assertRaises(nsx_exc.ClientCertificateException, self._provider().__enter__) def test_basic_provider(self): fname = '/tmp/cert.pem' # with basic provider, the file is provided by admin with open(fname, 'w') as f: f.write(self.CERT) f.write(self.PKEY) self._init_config(storage_type='none', cert_file=fname) with self._provider as p: self.assertEqual(fname, p.filename()) self.validate_basic_provider(self.CERT + self.PKEY) os.remove(fname) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v3/test_availability_zones.py0000666000175100017510000001402013244523345026765 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import uuidutils from neutron.tests import base from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.plugins.nsx_v3 import availability_zones as nsx_az class Nsxv3AvailabilityZonesTestCase(base.BaseTestCase): def setUp(self): super(Nsxv3AvailabilityZonesTestCase, self).setUp() self.az_name = "zone1" self.group_name = "az:%s" % self.az_name config.register_nsxv3_azs(cfg.CONF, [self.az_name]) self.global_md_proxy = uuidutils.generate_uuid() cfg.CONF.set_override( "metadata_proxy", self.global_md_proxy, group="nsx_v3") self.global_dhcp_profile = uuidutils.generate_uuid() cfg.CONF.set_override( "dhcp_profile", self.global_dhcp_profile, group="nsx_v3") cfg.CONF.set_override( "native_metadata_route", "1.1.1.1", group="nsx_v3") cfg.CONF.set_override("dns_domain", "xxx.com", group="nsx_v3") cfg.CONF.set_override("nameservers", ["10.1.1.1"], group="nsx_v3") cfg.CONF.set_override("switching_profiles", ["uuid1"], group="nsx_v3") cfg.CONF.set_override("dhcp_relay_service", "service1", group="nsx_v3") def _config_az(self, metadata_proxy="metadata_proxy1", dhcp_profile="dhcp_profile1", native_metadata_route="2.2.2.2", dns_domain="aaa.com", nameservers=["20.1.1.1"], default_overlay_tz='otz', default_vlan_tz='vtz', switching_profiles=["uuid2"], dhcp_relay_service="service2"): if metadata_proxy is not None: cfg.CONF.set_override("metadata_proxy", metadata_proxy, group=self.group_name) if dhcp_profile is not None: cfg.CONF.set_override("dhcp_profile", dhcp_profile, group=self.group_name) if native_metadata_route is not None: cfg.CONF.set_override("native_metadata_route", native_metadata_route, group=self.group_name) if dns_domain is not None: cfg.CONF.set_override("dns_domain", dns_domain, group=self.group_name) if nameservers is not None: cfg.CONF.set_override("nameservers", nameservers, group=self.group_name) if default_overlay_tz is not None: cfg.CONF.set_override("default_overlay_tz", default_overlay_tz, group=self.group_name) if default_vlan_tz is not None: cfg.CONF.set_override("default_vlan_tz", default_vlan_tz, group=self.group_name) if switching_profiles is not None: cfg.CONF.set_override("switching_profiles", switching_profiles, group=self.group_name) if dhcp_relay_service is not None: cfg.CONF.set_override("dhcp_relay_service", dhcp_relay_service, group=self.group_name) def test_simple_availability_zone(self): self._config_az() az = nsx_az.NsxV3AvailabilityZone(self.az_name) self.assertEqual(self.az_name, az.name) self.assertEqual("metadata_proxy1", az.metadata_proxy) self.assertEqual("dhcp_profile1", az.dhcp_profile) self.assertEqual("2.2.2.2", az.native_metadata_route) self.assertEqual("aaa.com", az.dns_domain) self.assertEqual(["20.1.1.1"], az.nameservers) self.assertEqual("otz", az.default_overlay_tz) self.assertEqual("vtz", az.default_vlan_tz) self.assertEqual(["uuid2"], az.switching_profiles) self.assertEqual("service2", az.dhcp_relay_service) def test_missing_group_section(self): self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxV3AvailabilityZone, "doesnt_exist") def test_availability_zone_missing_metadata_proxy(self): # Mandatory parameter self._config_az(metadata_proxy=None) self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxV3AvailabilityZone, self.az_name) def test_availability_zone_missing_dhcp_profile(self): # Mandatory parameter self._config_az(dhcp_profile=None) self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxV3AvailabilityZone, self.az_name) def test_availability_zone_missing_md_route(self): self._config_az(native_metadata_route=None) az = nsx_az.NsxV3AvailabilityZone(self.az_name) self.assertEqual("1.1.1.1", az.native_metadata_route) def test_availability_zone_missing_dns_domain(self): self._config_az(dns_domain=None) az = nsx_az.NsxV3AvailabilityZone(self.az_name) self.assertEqual("xxx.com", az.dns_domain) def test_availability_zone_missing_nameservers(self): self._config_az(nameservers=None) az = nsx_az.NsxV3AvailabilityZone(self.az_name) self.assertEqual(["10.1.1.1"], az.nameservers) def test_availability_zone_missing_profiles(self): self._config_az(switching_profiles=None) az = nsx_az.NsxV3AvailabilityZone(self.az_name) self.assertEqual(["uuid1"], az.switching_profiles) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v3/test_plugin.py0000666000175100017510000024477113244523345024415 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import netaddr from neutron.db import models_v2 from neutron.extensions import address_scope from neutron.extensions import l3 from neutron.extensions import securitygroup as secgrp from neutron.tests.unit import _test_extension_portbindings as test_bindings from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin from neutron.tests.unit.extensions import test_address_scope from neutron.tests.unit.extensions import test_extra_dhcp_opt as test_dhcpopts from neutron.tests.unit.extensions import test_extraroute as test_ext_route from neutron.tests.unit.extensions import test_l3 as test_l3_plugin from neutron.tests.unit.extensions \ import test_l3_ext_gw_mode as test_ext_gw_mode from neutron.tests.unit.scheduler \ import test_dhcp_agent_scheduler as test_dhcpagent from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import extraroute as xroute_apidef from neutron_lib.api.definitions import l3_ext_gw_mode as l3_egm_apidef from neutron_lib.api.definitions import port_security as psec from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api.definitions import vlantransparent as vlan_apidef from neutron_lib.callbacks import exceptions as nc_exc from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from oslo_config import cfg from oslo_utils import uuidutils from webob import exc from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import utils from vmware_nsx.plugins.nsx_v3 import plugin as nsx_plugin from vmware_nsx.services.lbaas.nsx_v3 import lb_driver_v2 from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.extensions import test_metadata from vmware_nsxlib.tests.unit.v3 import mocks as nsx_v3_mocks from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.v3 import exceptions as nsxlib_exc PLUGIN_NAME = 'vmware_nsx.plugin.NsxV3Plugin' NSX_TZ_NAME = 'default transport zone' NSX_DHCP_PROFILE_ID = 'default dhcp profile' NSX_METADATA_PROXY_ID = 'default metadata proxy' NSX_SWITCH_PROFILE = 'dummy switch profile' NSX_DHCP_RELAY_SRV = 'dhcp relay srv' def _mock_create_firewall_rules(*args): # NOTE(arosen): the code in the neutron plugin expects the # neutron rule id as the display_name. rules = args[5] return { 'rules': [ {'display_name': rule['id'], 'id': uuidutils.generate_uuid()} for rule in rules ]} def _return_id_key(*args, **kwargs): return {'id': uuidutils.generate_uuid()} def _mock_nsx_backend_calls(): mock.patch("vmware_nsxlib.v3.client.NSX3Client").start() fake_profile = {'key': 'FakeKey', 'resource_type': 'FakeResource', 'id': uuidutils.generate_uuid()} def _return_id(*args, **kwargs): return uuidutils.generate_uuid() def _return_same(key, *args, **kwargs): return key mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibSwitchingProfile." "find_by_display_name", return_value=[fake_profile] ).start() mock.patch( "vmware_nsxlib.v3.router.RouterLib.validate_tier0").start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibSwitchingProfile." "create_port_mirror_profile", side_effect=_return_id_key).start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibBridgeCluster." "get_id_by_name_or_id", return_value=uuidutils.generate_uuid()).start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibBridgeEndpoint.create", side_effect=_return_id_key).start() mock.patch( "vmware_nsxlib.v3.security.NsxLibNsGroup.find_by_display_name", ).start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.create", side_effect=_return_id_key).start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibDhcpProfile." "get_id_by_name_or_id", return_value=NSX_DHCP_PROFILE_ID).start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibDhcpRelayService." "get_id_by_name_or_id", return_value=NSX_DHCP_RELAY_SRV).start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibMetadataProxy." "get_id_by_name_or_id", side_effect=_return_same).start() mock.patch( "vmware_nsxlib.v3.resources.LogicalPort.create", side_effect=_return_id_key).start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalRouter.create", side_effect=_return_id_key).start() mock.patch( "vmware_nsxlib.v3.resources.LogicalDhcpServer.create", side_effect=_return_id_key).start() mock.patch( "vmware_nsxlib.v3.resources.LogicalDhcpServer.create_binding", side_effect=_return_id_key).start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalRouter." "get_firewall_section_id", side_effect=_return_id_key).start() mock.patch( "vmware_nsxlib.v3.NsxLib.get_version", return_value='2.2.0').start() mock.patch( "vmware_nsxlib.v3.load_balancer.Service.get_router_lb_service", return_value=None).start() class NsxV3PluginTestCaseMixin(test_plugin.NeutronDbPluginV2TestCase, nsxlib_testcase.NsxClientTestCase): def setup_conf_overrides(self): cfg.CONF.set_override('default_overlay_tz', NSX_TZ_NAME, 'nsx_v3') cfg.CONF.set_override('native_dhcp_metadata', False, 'nsx_v3') cfg.CONF.set_override('dhcp_profile', NSX_DHCP_PROFILE_ID, 'nsx_v3') cfg.CONF.set_override('metadata_proxy', NSX_METADATA_PROXY_ID, 'nsx_v3') cfg.CONF.set_override( 'network_scheduler_driver', 'neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler') def mock_plugin_methods(self): # mock unnecessary call which causes spawn mock_process_security_group_logging = mock.patch.object( nsx_plugin.NsxV3Plugin, '_process_security_group_logging') mock_process_security_group_logging.start() def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): self._patchers = [] _mock_nsx_backend_calls() self.setup_conf_overrides() self.mock_plugin_methods() # ignoring the given plugin and use the nsx-v3 one if not plugin.endswith('NsxTVDPlugin'): plugin = PLUGIN_NAME super(NsxV3PluginTestCaseMixin, self).setUp(plugin=plugin, ext_mgr=ext_mgr) self.maxDiff = None def tearDown(self): for patcher in self._patchers: patcher.stop() super(NsxV3PluginTestCaseMixin, self).tearDown() def _create_network(self, fmt, name, admin_state_up, arg_list=None, providernet_args=None, set_context=False, tenant_id=None, **kwargs): tenant_id = tenant_id or self._tenant_id data = {'network': {'name': name, 'admin_state_up': admin_state_up, 'tenant_id': tenant_id}} # Fix to allow the router:external attribute and any other # attributes containing a colon to be passed with # a double underscore instead kwargs = dict((k.replace('__', ':'), v) for k, v in kwargs.items()) if extnet_apidef.EXTERNAL in kwargs: arg_list = (extnet_apidef.EXTERNAL, ) + (arg_list or ()) if providernet_args: kwargs.update(providernet_args) for arg in (('admin_state_up', 'tenant_id', 'shared', 'availability_zone_hints') + (arg_list or ())): # Arg must be present if arg in kwargs: data['network'][arg] = kwargs[arg] network_req = self.new_create_request('networks', data, fmt) if set_context and tenant_id: # create a specific auth context for this request network_req.environ['neutron.context'] = context.Context( '', tenant_id) return network_req.get_response(self.api) def _create_l3_ext_network( self, physical_network=nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID): name = 'l3_ext_net' net_type = utils.NetworkTypes.L3_EXT providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: physical_network} return self.network(name=name, router__external=True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) def _save_networks(self, networks): ctx = context.get_admin_context() for network_id in networks: with ctx.session.begin(subtransactions=True): ctx.session.add(models_v2.Network(id=network_id)) def _enable_dhcp_relay(self): # Add the relay service to the config and availability zones cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3') cfg.CONF.set_override('dhcp_relay_service', NSX_DHCP_RELAY_SRV, 'nsx_v3') mock_nsx_version = mock.patch.object( self.plugin.nsxlib, 'feature_supported', return_value=True) mock_nsx_version.start() self.plugin.init_availability_zones() for az in self.plugin.get_azs_list(): az.translate_configured_names_to_uuids(self.plugin.nsxlib) class TestNetworksV2(test_plugin.TestNetworksV2, NsxV3PluginTestCaseMixin): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): # add vlan transparent to the configuration cfg.CONF.set_override('vlan_transparent', True) super(TestNetworksV2, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def tearDown(self): super(TestNetworksV2, self).tearDown() @mock.patch.object(nsx_plugin.NsxV3Plugin, 'validate_availability_zones') def test_create_network_with_availability_zone(self, mock_validate_az): name = 'net-with-zone' zone = ['zone1'] mock_validate_az.return_value = None with self.network(name=name, availability_zone_hints=zone) as net: az_hints = net['network']['availability_zone_hints'] self.assertListEqual(az_hints, zone) def test_network_failure_rollback(self): cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3') self.plugin = directory.get_plugin() with mock.patch.object(self.plugin.nsxlib.logical_port, 'create', side_effect=api_exc.NsxApiException): self.network() ctx = context.get_admin_context() networks = self.plugin.get_networks(ctx) self.assertListEqual([], networks) def test_create_provider_flat_network(self): providernet_args = {pnet.NETWORK_TYPE: 'flat'} with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.' 'create', side_effect=_return_id_key) as nsx_create, \ mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.' 'delete') as nsx_delete, \ mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='VLAN'),\ self.network(name='flat_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, )) as net: self.assertEqual('flat', net['network'].get(pnet.NETWORK_TYPE)) # make sure the network is created at the backend nsx_create.assert_called_once() # Delete the network and make sure it is deleted from the backend req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) nsx_delete.assert_called_once() def test_create_provider_flat_network_with_physical_net(self): physical_network = nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID providernet_args = {pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: physical_network} with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='VLAN'),\ self.network(name='flat_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net: self.assertEqual('flat', net['network'].get(pnet.NETWORK_TYPE)) def test_create_provider_flat_network_with_vlan(self): providernet_args = {pnet.NETWORK_TYPE: 'flat', pnet.SEGMENTATION_ID: 11} with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='VLAN'): result = self._create_network(fmt='json', name='bad_flat_net', admin_state_up=True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID)) data = self.deserialize('json', result) # should fail self.assertEqual('InvalidInput', data['NeutronError']['type']) def test_create_provider_geneve_network(self): providernet_args = {pnet.NETWORK_TYPE: 'geneve'} with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.' 'create', side_effect=_return_id_key) as nsx_create, \ mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.' 'delete') as nsx_delete, \ mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='OVERLAY'),\ self.network(name='geneve_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, )) as net: self.assertEqual('geneve', net['network'].get(pnet.NETWORK_TYPE)) # make sure the network is created at the backend nsx_create.assert_called_once() # Delete the network and make sure it is deleted from the backend req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) nsx_delete.assert_called_once() def test_create_provider_geneve_network_with_physical_net(self): physical_network = nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID providernet_args = {pnet.NETWORK_TYPE: 'geneve', pnet.PHYSICAL_NETWORK: physical_network} with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='OVERLAY'),\ self.network(name='geneve_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, )) as net: self.assertEqual('geneve', net['network'].get(pnet.NETWORK_TYPE)) def test_create_provider_geneve_network_with_vlan(self): providernet_args = {pnet.NETWORK_TYPE: 'geneve', pnet.SEGMENTATION_ID: 11} with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='OVERLAY'): result = self._create_network(fmt='json', name='bad_geneve_net', admin_state_up=True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID)) data = self.deserialize('json', result) # should fail self.assertEqual('InvalidInput', data['NeutronError']['type']) def test_create_provider_vlan_network(self): providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 11} with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.' 'create', side_effect=_return_id_key) as nsx_create, \ mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.' 'delete') as nsx_delete, \ mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='VLAN'),\ self.network(name='vlan_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID)) as net: self.assertEqual('vlan', net['network'].get(pnet.NETWORK_TYPE)) # make sure the network is created at the backend nsx_create.assert_called_once() # Delete the network and make sure it is deleted from the backend req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) nsx_delete.assert_called_once() def test_create_provider_nsx_network(self): physical_network = 'Fake logical switch' providernet_args = {pnet.NETWORK_TYPE: 'nsx-net', pnet.PHYSICAL_NETWORK: physical_network} with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.create', side_effect=nsxlib_exc.ResourceNotFound) as nsx_create, \ mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.' 'delete') as nsx_delete, \ self.network(name='nsx_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net: self.assertEqual('nsx-net', net['network'].get(pnet.NETWORK_TYPE)) self.assertEqual(physical_network, net['network'].get(pnet.PHYSICAL_NETWORK)) # make sure the network is NOT created at the backend nsx_create.assert_not_called() # Delete the network. It should NOT deleted from the backend req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) nsx_delete.assert_not_called() def test_create_provider_bad_nsx_network(self): physical_network = 'Bad logical switch' providernet_args = {pnet.NETWORK_TYPE: 'nsx-net', pnet.PHYSICAL_NETWORK: physical_network} with mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get", side_effect=nsxlib_exc.ResourceNotFound): result = self._create_network(fmt='json', name='bad_nsx_net', admin_state_up=True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) data = self.deserialize('json', result) # should fail self.assertEqual('InvalidInput', data['NeutronError']['type']) def test_create_ens_network_with_no_port_sec(self): cfg.CONF.set_override('ens_support', True, 'nsx_v3') providernet_args = {psec.PORTSECURITY: False} with mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone." "get_host_switch_mode", return_value="ENS"),\ mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get", return_value={'transport_zone_id': 'xxx'}): result = self._create_network(fmt='json', name='ens_net', admin_state_up=True, providernet_args=providernet_args, arg_list=(psec.PORTSECURITY,)) res = self.deserialize('json', result) # should succeed, and net should have port security disabled self.assertFalse(res['network']['port_security_enabled']) def test_create_ens_network_with_port_sec(self): cfg.CONF.set_override('ens_support', True, 'nsx_v3') providernet_args = {psec.PORTSECURITY: True} with mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone." "get_host_switch_mode", return_value="ENS"),\ mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get", return_value={'transport_zone_id': 'xxx'}): result = self._create_network(fmt='json', name='ens_net', admin_state_up=True, providernet_args=providernet_args, arg_list=(psec.PORTSECURITY,)) res = self.deserialize('json', result) # should fail self.assertEqual('NsxENSPortSecurity', res['NeutronError']['type']) def test_update_ens_network(self): cfg.CONF.set_override('ens_support', True, 'nsx_v3') providernet_args = {psec.PORTSECURITY: False} with mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone." "get_host_switch_mode", return_value="ENS"),\ mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get", return_value={'transport_zone_id': 'xxx'}): result = self._create_network(fmt='json', name='ens_net', admin_state_up=True, providernet_args=providernet_args, arg_list=(psec.PORTSECURITY,)) net = self.deserialize('json', result) net_id = net['network']['id'] args = {'network': {psec.PORTSECURITY: True}} req = self.new_update_request('networks', args, net_id, fmt='json') res = self.deserialize('json', req.get_response(self.api)) # should fail self.assertEqual('NsxENSPortSecurity', res['NeutronError']['type']) def test_create_transparent_vlan_network(self): providernet_args = {vlan_apidef.VLANTRANSPARENT: True} with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='OVERLAY'),\ self.network(name='vt_net', providernet_args=providernet_args, arg_list=(vlan_apidef.VLANTRANSPARENT, )) as net: self.assertTrue(net['network'].get(vlan_apidef.VLANTRANSPARENT)) def test_create_provider_vlan_network_with_transparent(self): providernet_args = {pnet.NETWORK_TYPE: 'vlan', vlan_apidef.VLANTRANSPARENT: True} with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='VLAN'): result = self._create_network(fmt='json', name='badvlan_net', admin_state_up=True, providernet_args=providernet_args, arg_list=( pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, vlan_apidef.VLANTRANSPARENT)) data = self.deserialize('json', result) self.assertEqual('vlan', data['network'].get(pnet.NETWORK_TYPE)) class TestSubnetsV2(test_plugin.TestSubnetsV2, NsxV3PluginTestCaseMixin): def test_create_subnet_with_shared_address_space(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '100.64.0.0/16'}} self.assertRaises(n_exc.InvalidInput, self.plugin.create_subnet, context.get_admin_context(), data) def test_subnet_update_ipv4_and_ipv6_pd_v6stateless_subnets(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_subnet_update_ipv4_and_ipv6_pd_slaac_subnets(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_subnet_native_dhcp_subnet_enabled(self): cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3') with self.network() as network: with mock.patch.object(self.plugin, '_enable_native_dhcp') as enable_dhcp,\ self.subnet(network=network, enable_dhcp=True): # Native dhcp should be set for this subnet self.assertTrue(enable_dhcp.called) def test_subnet_native_dhcp_subnet_disabled(self): cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3') with self.network() as network: with mock.patch.object(self.plugin, '_enable_native_dhcp') as enable_dhcp,\ self.subnet(network=network, enable_dhcp=False): # Native dhcp should be set for this subnet self.assertFalse(enable_dhcp.called) def test_subnet_native_dhcp_with_relay(self): """Verify that the relay service is added to the router interface""" self._enable_dhcp_relay() with self.network() as network: with mock.patch.object(self.plugin, '_enable_native_dhcp') as enable_dhcp,\ self.subnet(network=network, enable_dhcp=True): # Native dhcp should not be set for this subnet self.assertFalse(enable_dhcp.called) class TestPortsV2(test_plugin.TestPortsV2, NsxV3PluginTestCaseMixin, test_bindings.PortBindingsTestCase, test_bindings.PortBindingsHostTestCaseMixin, test_bindings.PortBindingsVnicTestCaseMixin): VIF_TYPE = portbindings.VIF_TYPE_OVS HAS_PORT_FILTER = True def setUp(self): cfg.CONF.set_override('switching_profiles', [NSX_SWITCH_PROFILE], 'nsx_v3') super(TestPortsV2, self).setUp() self.plugin = directory.get_plugin() self.ctx = context.get_admin_context() def test_update_port_delete_ip(self): # This test case overrides the default because the nsx plugin # implements port_security/security groups and it is not allowed # to remove an ip address from a port unless the security group # is first removed. with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': [], secgrp.SECURITYGROUPS: []}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(res['port']['admin_state_up'], data['port']['admin_state_up']) self.assertEqual(res['port']['fixed_ips'], data['port']['fixed_ips']) def test_delete_dhcp_port(self): cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3') with self.subnet(): pl = directory.get_plugin() ctx = context.Context(user_id=None, tenant_id=self._tenant_id, is_admin=False) ports = pl.get_ports( ctx, filters={'device_owner': [constants.DEVICE_OWNER_DHCP]}) req = self.new_delete_request('ports', ports[0]['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_fail_create_port_with_ext_net(self): expected_error = 'InvalidInput' with self._create_l3_ext_network() as network: with self.subnet(network=network, cidr='10.0.0.0/24'): device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X' res = self._create_port(self.fmt, network['network']['id'], exc.HTTPBadRequest.code, device_owner=device_owner) data = self.deserialize(self.fmt, res) self.assertEqual(expected_error, data['NeutronError']['type']) def test_fail_update_port_with_ext_net(self): with self._create_l3_ext_network() as network: with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: with self.port(subnet=subnet) as port: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X' data = {'port': {'device_owner': device_owner}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_port_with_qos(self): with self.network() as network: policy_id = uuidutils.generate_uuid() data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'qos_policy_id': policy_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01'} } with mock.patch.object(self.plugin, '_get_qos_profile_id'): port = self.plugin.create_port(self.ctx, data) self.assertEqual(policy_id, port['qos_policy_id']) # Get port should also return the qos policy id with mock.patch('vmware_nsx.services.qos.common.utils.' 'get_port_policy_id', return_value=policy_id): port = self.plugin.get_port(self.ctx, port['id']) self.assertEqual(policy_id, port['qos_policy_id']) def test_update_port_with_qos(self): with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01'} } port = self.plugin.create_port(self.ctx, data) policy_id = uuidutils.generate_uuid() data['port']['qos_policy_id'] = policy_id with mock.patch.object(self.plugin, '_get_qos_profile_id'): res = self.plugin.update_port(self.ctx, port['id'], data) self.assertEqual(policy_id, res['qos_policy_id']) # Get port should also return the qos policy id with mock.patch('vmware_nsx.services.qos.common.utils.' 'get_port_policy_id', return_value=policy_id): res = self.plugin.get_port(self.ctx, port['id']) self.assertEqual(policy_id, res['qos_policy_id']) def test_create_ext_port_with_qos_fail(self): with self._create_l3_ext_network() as network: with self.subnet(network=network, cidr='10.0.0.0/24'): policy_id = uuidutils.generate_uuid() data = {'port': {'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'qos_policy_id': policy_id}} # Cannot add qos policy to a router port self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, self.ctx, data) def _test_create_illegal_port_with_qos_fail(self, device_owner): with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24'): policy_id = uuidutils.generate_uuid() data = {'port': {'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'device_owner': device_owner, 'qos_policy_id': policy_id}} # Cannot add qos policy to this type of port self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, self.ctx, data) def test_create_router_port_with_qos_fail(self): self._test_create_illegal_port_with_qos_fail( 'network:router_interface') def test_create_dhcp_port_with_qos_fail(self): self._test_create_illegal_port_with_qos_fail('network:dhcp') def _test_update_illegal_port_with_qos_fail(self, device_owner): with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24'): policy_id = uuidutils.generate_uuid() data = {'port': {'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'qos_port', 'admin_state_up': True, 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01', 'device_id': 'dummy', 'device_owner': ''}} port = self.plugin.create_port(self.ctx, data) policy_id = uuidutils.generate_uuid() data['port'] = {'qos_policy_id': policy_id, 'device_owner': device_owner} # Cannot add qos policy to a router interface port self.assertRaises(n_exc.InvalidInput, self.plugin.update_port, self.ctx, port['id'], data) def test_update_router_port_with_qos_fail(self): self._test_update_illegal_port_with_qos_fail( 'network:router_interface') def test_update_dhcp_port_with_qos_fail(self): self._test_update_illegal_port_with_qos_fail('network:dhcp') def test_create_port_with_qos_on_net(self): with self.network() as network: policy_id = uuidutils.generate_uuid() device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X' data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': device_owner, 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01'} } with mock.patch.object(self.plugin, '_get_qos_profile_id') as get_profile: with mock.patch('vmware_nsx.services.qos.common.utils.' 'get_network_policy_id', return_value=policy_id): self.plugin.create_port(self.ctx, data) get_profile.assert_called_once_with(self.ctx, policy_id) def test_update_port_with_qos_on_net(self): with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01'} } port = self.plugin.create_port(self.ctx, data) policy_id = uuidutils.generate_uuid() device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X' data['port']['device_owner'] = device_owner with mock.patch.object(self.plugin, '_get_qos_profile_id') as get_profile: with mock.patch('vmware_nsx.services.qos.common.utils.' 'get_network_policy_id', return_value=policy_id): self.plugin.update_port(self.ctx, port['id'], data) get_profile.assert_called_once_with(self.ctx, policy_id) def _get_ports_with_fields(self, tenid, fields, expected_count): pl = directory.get_plugin() ctx = context.Context(user_id=None, tenant_id=tenid, is_admin=False) ports = pl.get_ports(ctx, filters={'tenant_id': [tenid]}, fields=fields) self.assertEqual(expected_count, len(ports)) def test_get_ports_with_fields(self): with self.port(), self.port(), self.port(), self.port() as p: tenid = p['port']['tenant_id'] # get all fields: self._get_ports_with_fields(tenid, None, 4) # get specific fields: self._get_ports_with_fields(tenid, 'mac_address', 4) self._get_ports_with_fields(tenid, 'network_id', 4) def test_port_failure_rollback_dhcp_exception(self): cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3') self.plugin = directory.get_plugin() with mock.patch.object(self.plugin, '_add_dhcp_binding', side_effect=nsxlib_exc.ManagerError): self.port() ctx = context.get_admin_context() networks = self.plugin.get_ports(ctx) self.assertListEqual([], networks) def test_update_port_add_additional_ip(self): """Test update of port with additional IP fails.""" with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_port_additional_ip(self): """Test that creation of port with additional IP fails.""" with self.subnet() as subnet: data = {'port': {'network_id': subnet['subnet']['network_id'], 'tenant_id': subnet['subnet']['tenant_id'], 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]}} port_req = self.new_create_request('ports', data) res = port_req.get_response(self.api) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_port_with_switching_profiles(self): """Tests that nsx ports get the configures switching profiles""" self.plugin = directory.get_plugin() with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'p1', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01'} } with mock.patch.object(self.plugin.nsxlib.logical_port, 'create', return_value={'id': 'fake'}) as nsx_create: self.plugin.create_port(self.ctx, data) expected_prof = self.plugin.get_default_az().\ switching_profiles_objs[0] actual_profs = nsx_create.call_args[1]['switch_profile_ids'] # the ports switching profiles should start with the # configured one self.assertEqual(expected_prof, actual_profs[0]) def test_create_ens_port_with_no_port_sec(self): with self.subnet() as subnet,\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone." "get_host_switch_mode", return_value="ENS"),\ mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get", return_value={'transport_zone_id': 'xxx'}): args = {'port': {'network_id': subnet['subnet']['network_id'], 'tenant_id': subnet['subnet']['tenant_id'], 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}], psec.PORTSECURITY: False}} port_req = self.new_create_request('ports', args) port = self.deserialize(self.fmt, port_req.get_response(self.api)) self.assertFalse(port['port']['port_security_enabled']) def test_create_ens_port_with_port_sec(self): with self.subnet() as subnet,\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone." "get_host_switch_mode", return_value="ENS"),\ mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get", return_value={'transport_zone_id': 'xxx'}): args = {'port': {'network_id': subnet['subnet']['network_id'], 'tenant_id': subnet['subnet']['tenant_id'], 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}], psec.PORTSECURITY: True}} port_req = self.new_create_request('ports', args) res = self.deserialize('json', port_req.get_response(self.api)) # should fail self.assertEqual('NsxENSPortSecurity', res['NeutronError']['type']) def test_update_ens_port(self): with self.subnet() as subnet,\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone." "get_host_switch_mode", return_value="ENS"),\ mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get", return_value={'transport_zone_id': 'xxx'}): args = {'port': {'network_id': subnet['subnet']['network_id'], 'tenant_id': subnet['subnet']['tenant_id'], 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}], psec.PORTSECURITY: False}} port_req = self.new_create_request('ports', args) port = self.deserialize(self.fmt, port_req.get_response(self.api)) port_id = port['port']['id'] args = {'port': {psec.PORTSECURITY: True}} req = self.new_update_request('ports', args, port_id) res = self.deserialize('json', req.get_response(self.api)) # should fail self.assertEqual('NsxENSPortSecurity', res['NeutronError']['type']) def test_update_port_update_ip_address_only(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_update_port_with_new_ipv6_slaac_subnet_in_fixed_ips(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_update_port_mac_v6_slaac(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_requested_subnet_id_v4_and_v6(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_requested_invalid_fixed_ips(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_requested_subnet_id_v4_and_v6_slaac(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_range_allocation(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_create_port_anticipating_allocation(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_create_compute_port_with_relay_no_router(self): """Compute port creation should fail if a network with dhcp relay is not connected to a router """ self._enable_dhcp_relay() with self.network() as network, \ self.subnet(network=network, enable_dhcp=True) as s1: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X' data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': device_owner, 'fixed_ips': [{'subnet_id': s1['subnet']['id']}], 'mac_address': '00:00:00:00:00:01'} } self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, self.ctx, data) class DHCPOptsTestCase(test_dhcpopts.TestExtraDhcpOpt, NsxV3PluginTestCaseMixin): def setUp(self, plugin=None): super(test_dhcpopts.ExtraDhcpOptDBTestCase, self).setUp( plugin=PLUGIN_NAME) class NSXv3DHCPAgentAZAwareWeightSchedulerTestCase( test_dhcpagent.DHCPAgentAZAwareWeightSchedulerTestCase, NsxV3PluginTestCaseMixin): def setUp(self): super(NSXv3DHCPAgentAZAwareWeightSchedulerTestCase, self).setUp() self.plugin = directory.get_plugin() self.ctx = context.get_admin_context() def setup_coreplugin(self, core_plugin=None, load_plugins=True): super(NSXv3DHCPAgentAZAwareWeightSchedulerTestCase, self).setup_coreplugin(core_plugin=PLUGIN_NAME, load_plugins=load_plugins) class TestL3ExtensionManager(object): def get_resources(self): # Simulate extension of L3 attribute map l3.L3().update_attributes_map( l3_egm_apidef.RESOURCE_ATTRIBUTE_MAP) l3.L3().update_attributes_map( xroute_apidef.RESOURCE_ATTRIBUTE_MAP) return (l3.L3.get_resources() + address_scope.Address_scope.get_resources()) def get_actions(self): return [] def get_request_extensions(self): return [] class L3NatTest(test_l3_plugin.L3BaseForIntTests, NsxV3PluginTestCaseMixin, test_address_scope.AddressScopeTestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) cfg.CONF.set_default('max_routes', 3) ext_mgr = ext_mgr or TestL3ExtensionManager() mock_nsx_version = mock.patch.object(nsx_plugin.utils, 'is_nsx_version_2_0_0', new=lambda v: True) mock_nsx_version.start() # Make sure the LB callback is not called on router deletion self.lb_mock = mock.patch( "vmware_nsx.services.lbaas.nsx_v3.lb_driver_v2." "EdgeLoadbalancerDriverV2._check_lb_service_on_router") self.lb_mock.start() super(L3NatTest, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.plugin_instance = directory.get_plugin() self._plugin_name = "%s.%s" % ( self.plugin_instance.__module__, self.plugin_instance.__class__.__name__) self._plugin_class = self.plugin_instance.__class__ self.plugin_instance.fwaas_callbacks = None def test_floatingip_create_different_fixed_ip_same_port(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_router_add_interface_multiple_ipv4_subnet_port_returns_400(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_router_add_interface_multiple_ipv6_subnet_port(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_floatingip_update_different_fixed_ip_same_port(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_create_multiple_floatingips_same_fixed_ip_same_port(self): self.skipTest('Multiple fixed ips on a port are not supported') class TestL3NatTestCase(L3NatTest, test_l3_plugin.L3NatDBIntTestCase, test_ext_route.ExtraRouteDBTestCaseBase, test_metadata.MetaDataTestCase): block_dhcp_notifier = False def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): super(TestL3NatTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) cfg.CONF.set_override('metadata_mode', None, 'nsx_v3') cfg.CONF.set_override('metadata_on_demand', False, 'nsx_v3') def _test_create_l3_ext_network( self, physical_network=nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID): name = 'l3_ext_net' net_type = utils.NetworkTypes.L3_EXT expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (extnet_apidef.EXTERNAL, True), (pnet.NETWORK_TYPE, net_type), (pnet.PHYSICAL_NETWORK, physical_network)] with self._create_l3_ext_network(physical_network) as net: for k, v in expected: self.assertEqual(net['network'][k], v) def test_create_l3_ext_network_with_default_tier0(self): self._test_create_l3_ext_network() def test_floatingip_update(self): super(TestL3NatTestCase, self).test_floatingip_update( expected_status=constants.FLOATINGIP_STATUS_DOWN) def test_floatingip_with_invalid_create_port(self): self._test_floatingip_with_invalid_create_port(self._plugin_name) def test_router_add_interface_dup_subnet2_returns_400(self): self.skipTest('not supported') def test_router_add_interface_ipv6_port_existing_network_returns_400(self): self.skipTest('not supported') def test_routes_update_for_multiple_routers(self): self.skipTest('not supported') def test_floatingip_multi_external_one_internal(self): self.skipTest('not supported') def test_floatingip_same_external_and_internal(self): self.skipTest('not supported') def test_route_update_with_external_route(self): self.skipTest('not supported') def test_floatingip_update_subnet_gateway_disabled(self): self.skipTest('not supported') def test_router_delete_with_lb_service(self): self.lb_mock.stop() # Create the LB object - here the delete callback is registered lb_driver = lb_driver_v2.EdgeLoadbalancerDriverV2() with self.router() as router: with mock.patch('vmware_nsxlib.v3.load_balancer.Service.' 'get_router_lb_service'): self.assertRaises(nc_exc.CallbackFailure, self.plugin_instance.delete_router, context.get_admin_context(), router['router']['id']) # Unregister callback lb_driver._unsubscribe_router_delete_callback() self.lb_mock.start() def test_multiple_subnets_on_different_routers(self): with self.network() as network: with self.subnet(network=network) as s1,\ self.subnet(network=network, cidr='11.0.0.0/24') as s2,\ self.router() as r1,\ self.router() as r2: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self.assertRaises(n_exc.Conflict, self.plugin_instance.add_router_interface, context.get_admin_context(), r2['router']['id'], {'subnet_id': s2['subnet']['id']}) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s2['subnet']['id'], None) def test_multiple_subnets_on_same_router(self): with self.network() as network: with self.subnet(network=network) as s1,\ self.subnet(network=network, cidr='11.0.0.0/24') as s2,\ self.router() as r1: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self.assertRaises(n_exc.InvalidInput, self.plugin_instance.add_router_interface, context.get_admin_context(), r1['router']['id'], {'subnet_id': s2['subnet']['id']}) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) def test_router_remove_interface_inuse_return_409(self): with self.router() as r1,\ self.subnet() as ext_subnet,\ self.subnet(cidr='11.0.0.0/24') as s1: self._set_net_external(ext_subnet['subnet']['network_id']) self._router_interface_action( 'add', r1['router']['id'], s1['subnet']['id'], None) self._add_external_gateway_to_router( r1['router']['id'], ext_subnet['subnet']['network_id']) with self.port(subnet=s1,) as p: fip_res = self._create_floatingip( self.fmt, ext_subnet['subnet']['network_id'], subnet_id=ext_subnet['subnet']['id'], port_id=p['port']['id']) fip = self.deserialize(self.fmt, fip_res) self._router_interface_action( 'remove', r1['router']['id'], s1['subnet']['id'], None, expected_code=exc.HTTPConflict.code) self._delete('floatingips', fip['floatingip']['id']) self._remove_external_gateway_from_router( r1['router']['id'], ext_subnet['subnet']['network_id']) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) def test_router_update_on_external_port(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = body['router']['external_gateway_info']['network_id'] self.assertEqual(net_id, s['subnet']['network_id']) port_res = self._list_ports( 'json', 200, s['subnet']['network_id'], tenant_id=r['router']['tenant_id'], device_owner=constants.DEVICE_OWNER_ROUTER_GW) port_list = self.deserialize('json', port_res) self.assertEqual(len(port_list['ports']), 1) routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}] self.assertRaises(n_exc.InvalidInput, self.plugin_instance.update_router, context.get_admin_context(), r['router']['id'], {'router': {'routes': routes}}) updates = {'admin_state_up': False} self.assertRaises(n_exc.InvalidInput, self.plugin_instance.update_router, context.get_admin_context(), r['router']['id'], {'router': updates}) self._remove_external_gateway_from_router( r['router']['id'], s['subnet']['network_id']) body = self._show('routers', r['router']['id']) gw_info = body['router']['external_gateway_info'] self.assertIsNone(gw_info) def test_create_router_gateway_fails(self): self.skipTest('not supported') def test_router_remove_ipv6_subnet_from_interface(self): self.skipTest('not supported') def test_router_add_interface_multiple_ipv6_subnets_same_net(self): self.skipTest('not supported') def test_router_add_interface_multiple_ipv4_subnets(self): self.skipTest('not supported') def test_floatingip_update_to_same_port_id_twice(self): self.skipTest('Plugin changes floating port status') def _test_create_subnetpool(self, prefixes, expected=None, admin=False, **kwargs): keys = kwargs.copy() keys.setdefault('tenant_id', self._tenant_id) with self.subnetpool(prefixes, admin, **keys) as subnetpool: self._validate_resource(subnetpool, keys, 'subnetpool') if expected: self._compare_resource(subnetpool, expected, 'subnetpool') return subnetpool def _update_router_enable_snat(self, router_id, network_id, enable_snat): return self._update('routers', router_id, {'router': {'external_gateway_info': {'network_id': network_id, 'enable_snat': enable_snat}}}) def test_router_no_snat_with_different_address_scope(self): """Test that if the router has no snat, you cannot add an interface from a different address scope than the gateway. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self.network() as ext_net: self._set_net_external(ext_net['network']['id']) as_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') subnetpool = self._test_create_subnetpool( [subnet.cidr], name='sp1', min_prefixlen='24', address_scope_id=as_id) subnetpool_id = subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': ext_net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'enable_dhcp': False, 'tenant_id': ext_net['network']['tenant_id']}} req = self.new_create_request('subnets', data) ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) # create a regular network on another address scope with self.address_scope(name='as2') as addr_scope2, \ self.network() as net: as_id2 = addr_scope2['address_scope']['id'] subnet2 = netaddr.IPNetwork('20.10.10.0/24') subnetpool2 = self._test_create_subnetpool( [subnet2.cidr], name='sp2', min_prefixlen='24', address_scope_id=as_id2) subnetpool_id2 = subnetpool2['subnetpool']['id'] data = {'subnet': { 'network_id': net['network']['id'], 'subnetpool_id': subnetpool_id2, 'ip_version': 4, 'tenant_id': net['network']['tenant_id']}} req = self.new_create_request('subnets', data) int_subnet = self.deserialize( self.fmt, req.get_response(self.api)) # create a no snat router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['subnet']['network_id']) self._update_router_enable_snat( r['router']['id'], ext_subnet['subnet']['network_id'], False) # should fail adding the interface to the router err_code = exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], int_subnet['subnet']['id'], None, err_code) def test_router_no_snat_with_same_address_scope(self): """Test that if the router has no snat, you can add an interface from the same address scope as the gateway. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self.network() as ext_net: self._set_net_external(ext_net['network']['id']) as_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool( [subnet.cidr], name='sp1', min_prefixlen='24', address_scope_id=as_id) subnetpool_id = subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': ext_net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'enable_dhcp': False, 'tenant_id': ext_net['network']['tenant_id']}} req = self.new_create_request('subnets', data) ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) # create a regular network on the same address scope with self.network() as net: data = {'subnet': { 'network_id': net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'tenant_id': net['network']['tenant_id']}} req = self.new_create_request('subnets', data) int_subnet = self.deserialize( self.fmt, req.get_response(self.api)) # create a no snat router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['subnet']['network_id']) self._update_router_enable_snat( r['router']['id'], ext_subnet['subnet']['network_id'], False) # should succeed adding the interface to the router self._router_interface_action('add', r['router']['id'], int_subnet['subnet']['id'], None) def _mock_add_snat_rule(self): return mock.patch("vmware_nsxlib.v3.router.RouterLib." "add_gw_snat_rule") def _mock_del_snat_rule(self): return mock.patch("vmware_nsxlib.v3.router.RouterLib." "delete_gw_snat_rule_by_source") def _prepare_external_subnet_on_address_scope(self, ext_net, address_scope): self._set_net_external(ext_net['network']['id']) as_id = address_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool( [subnet.cidr], name='sp1', min_prefixlen='24', address_scope_id=as_id) subnetpool_id = subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': ext_net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'enable_dhcp': False, 'tenant_id': ext_net['network']['tenant_id']}} req = self.new_create_request('subnets', data) ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) return ext_subnet['subnet'] def _create_subnet_and_assert_snat_rules(self, subnetpool_id, router_id, assert_snat_deleted=False, assert_snat_added=False): # create a regular network on the given subnet pool with self.network() as net: data = {'subnet': { 'network_id': net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'tenant_id': net['network']['tenant_id']}} req = self.new_create_request('subnets', data) int_subnet = self.deserialize( self.fmt, req.get_response(self.api)) with self._mock_add_snat_rule() as add_nat,\ self._mock_del_snat_rule() as delete_nat: # Add the interface self._router_interface_action( 'add', router_id, int_subnet['subnet']['id'], None) if assert_snat_deleted: delete_nat.assert_called() else: delete_nat.assert_not_called() if assert_snat_added: add_nat.assert_called() else: add_nat.assert_not_called() def test_router_address_scope_snat_rules(self): """Test that if the router interface had the same address scope as the gateway - snat rule is not added. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self.network() as ext_net: ext_subnet = self._prepare_external_subnet_on_address_scope( ext_net, addr_scope) # create a router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['network_id']) # create a regular network on same address scope # and verify no snat change as_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('30.10.10.0/24') subnetpool = self._test_create_subnetpool( [subnet.cidr], name='sp2', min_prefixlen='24', address_scope_id=as_id) as_id = addr_scope['address_scope']['id'] subnetpool_id = subnetpool['subnetpool']['id'] self._create_subnet_and_assert_snat_rules( subnetpool_id, r['router']['id']) # create a regular network on a different address scope # and verify snat rules are added with self.address_scope(name='as2') as addr_scope2: as2_id = addr_scope2['address_scope']['id'] subnet2 = netaddr.IPNetwork('20.10.10.0/24') subnetpool2 = self._test_create_subnetpool( [subnet2.cidr], name='sp2', min_prefixlen='24', address_scope_id=as2_id) subnetpool2_id = subnetpool2['subnetpool']['id'] self._create_subnet_and_assert_snat_rules( subnetpool2_id, r['router']['id'], assert_snat_added=True) def _test_router_address_scope_change(self, change_gw=False): """When subnetpool address scope changes, and router that was originally under same address scope, results having different address scopes, relevant snat rules are added. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self.network() as ext_net: ext_subnet = self._prepare_external_subnet_on_address_scope( ext_net, addr_scope) # create a router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['network_id']) # create a regular network on same address scope # and verify no snat change as_id = addr_scope['address_scope']['id'] subnet2 = netaddr.IPNetwork('40.10.10.0/24') subnetpool2 = self._test_create_subnetpool( [subnet2.cidr], name='sp2', min_prefixlen='24', address_scope_id=as_id) subnetpool2_id = subnetpool2['subnetpool']['id'] self._create_subnet_and_assert_snat_rules( subnetpool2_id, r['router']['id']) # change address scope of the first subnetpool with self.address_scope(name='as2') as addr_scope2,\ self._mock_add_snat_rule() as add_nat: as2_id = addr_scope2['address_scope']['id'] data = {'subnetpool': { 'address_scope_id': as2_id}} if change_gw: subnetpool_to_update = ext_subnet['subnetpool_id'] else: subnetpool_to_update = subnetpool2_id req = self.new_update_request('subnetpools', data, subnetpool_to_update) req.get_response(self.api) add_nat.assert_called_once() def test_router_address_scope_change(self): self._test_router_address_scope_change() def test_router_address_scope_gw_change(self): self._test_router_address_scope_change(change_gw=True) def _test_3leg_router_address_scope_change(self, change_gw=False, change_2gw=False): """Test address scope change scenarios with router that covers 3 address scopes """ # create an external network on one address scope with self.address_scope(name='as1') as as1, \ self.address_scope(name='as2') as as2, \ self.address_scope(name='as3') as as3, \ self.network() as ext_net: ext_subnet = self._prepare_external_subnet_on_address_scope( ext_net, as1) as1_id = as1['address_scope']['id'] # create a router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['network_id']) # create a regular network on address scope 2 # and verify snat change as2_id = as2['address_scope']['id'] subnet2 = netaddr.IPNetwork('20.10.10.0/24') subnetpool2 = self._test_create_subnetpool( [subnet2.cidr], name='sp2', min_prefixlen='24', address_scope_id=as2_id) subnetpool2_id = subnetpool2['subnetpool']['id'] self._create_subnet_and_assert_snat_rules( subnetpool2_id, r['router']['id'], assert_snat_added=True) # create a regular network on address scope 3 # verify no snat change as3_id = as3['address_scope']['id'] subnet3 = netaddr.IPNetwork('30.10.10.0/24') subnetpool3 = self._test_create_subnetpool( [subnet3.cidr], name='sp2', min_prefixlen='24', address_scope_id=as3_id) subnetpool3_id = subnetpool3['subnetpool']['id'] self._create_subnet_and_assert_snat_rules( subnetpool3_id, r['router']['id'], assert_snat_added=True) with self._mock_add_snat_rule() as add_nat, \ self._mock_del_snat_rule() as del_nat: if change_gw: # change address scope of GW subnet subnetpool_to_update = ext_subnet['subnetpool_id'] else: subnetpool_to_update = subnetpool2_id if change_2gw: # change subnet2 to be in GW address scope target_as = as1_id else: target_as = as3_id data = {'subnetpool': { 'address_scope_id': target_as}} req = self.new_update_request('subnetpools', data, subnetpool_to_update) req.get_response(self.api) if change_gw: # The test changed address scope of gw subnet. # Both previous rules should be deleted, # and one new rule for subnet2 should be added del_nat.assert_called() self.assertEqual(2, del_nat.call_count) add_nat.assert_called_once() else: if change_2gw: # The test changed address scope of subnet2 to be # same as GW address scope. # Snat rule for as2 will be deleted. No effect on as3 # rule. del_nat.assert_called_once() else: # The test changed address scope of subnet2 to # as3. Affected snat rule should be re-created. del_nat.assert_called_once() add_nat.assert_called_once() def test_3leg_router_address_scope_change(self): self._test_3leg_router_address_scope_change() def test_3leg_router_address_scope_change_to_gw(self): self._test_3leg_router_address_scope_change(change_2gw=True) def test_3leg_router_gw_address_scope_change(self): self._test_3leg_router_address_scope_change(change_gw=True) def test_subnetpool_router_address_scope_change_no_effect(self): """When all router interfaces are allocated from same subnetpool, changing address scope on this subnetpool should not affect snat rules. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self.network() as ext_net: ext_subnet = self._prepare_external_subnet_on_address_scope( ext_net, addr_scope) # create a router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['network_id']) # create a regular network on same address scope # and verify no snat change self._create_subnet_and_assert_snat_rules( ext_subnet['subnetpool_id'], r['router']['id']) with self.address_scope(name='as2') as addr_scope2,\ self._mock_add_snat_rule() as add_nat,\ self._mock_del_snat_rule() as delete_nat: as2_id = addr_scope2['address_scope']['id'] # change address scope of the subnetpool data = {'subnetpool': { 'address_scope_id': as2_id}} req = self.new_update_request('subnetpools', data, ext_subnet['subnetpool_id']) req.get_response(self.api) add_nat.assert_not_called() delete_nat.assert_not_called() def test_router_admin_state(self): """It is not allowed to set the router admin-state to down""" with self.router() as r: self._update('routers', r['router']['id'], {'router': {'admin_state_up': False}}, expected_code=exc.HTTPBadRequest.code) def test_router_dhcp_relay_dhcp_enabled(self): """Verify that the relay service is added to the router interface""" self._enable_dhcp_relay() with self.network() as network: with mock.patch.object(self.plugin, 'validate_router_dhcp_relay'),\ self.subnet(network=network, enable_dhcp=True) as s1,\ self.router() as r1,\ mock.patch.object(self.plugin.nsxlib.logical_router_port, 'update') as mock_update_port: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) mock_update_port.assert_called_once_with( mock.ANY, relay_service_uuid=NSX_DHCP_RELAY_SRV, subnets=mock.ANY) def test_router_dhcp_relay_dhcp_disabled(self): """Verify that the relay service is not added to the router interface If the subnet do not have enabled dhcp """ self._enable_dhcp_relay() with self.network() as network: with mock.patch.object(self.plugin, 'validate_router_dhcp_relay'),\ self.subnet(network=network, enable_dhcp=False) as s1,\ self.router() as r1,\ mock.patch.object(self.plugin.nsxlib.logical_router_port, 'update') as mock_update_port: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) mock_update_port.assert_called_once_with( mock.ANY, relay_service_uuid=None, subnets=mock.ANY) def test_router_dhcp_relay_no_ipam(self): """Verify that a router cannot be created with relay and no ipam""" # Add the relay service to the config and availability zones self._enable_dhcp_relay() self.assertRaises(n_exc.InvalidInput, self.plugin_instance.create_router, context.get_admin_context(), {'router': {'name': 'rtr'}}) def test_router_add_gateway_no_subnet_forbidden(self): with self.router() as r: with self.network() as n: self._set_net_external(n['network']['id']) self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], expected_code=exc.HTTPBadRequest.code) def test_router_add_gateway_no_subnet(self): self.skipTest('No support for no subnet gateway set') @mock.patch.object(nsx_plugin.NsxV3Plugin, 'validate_availability_zones') def test_create_router_with_availability_zone(self, mock_validate_az): name = 'rtr-with-zone' zone = ['zone1'] mock_validate_az.return_value = None with self.router(name=name, availability_zone_hints=zone) as rtr: az_hints = rtr['router']['availability_zone_hints'] self.assertListEqual(zone, az_hints) def _test_route_update_illegal(self, destination): routes = [{'destination': destination, 'nexthop': '10.0.1.3'}] with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: fixed_ip_data = [{'ip_address': '10.0.1.2'}] with self.port(subnet=s, fixed_ips=fixed_ip_data) as p: self._router_interface_action( 'add', r['router']['id'], None, p['port']['id']) self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=400) def test_route_update_illegal(self): self._test_route_update_illegal('0.0.0.0/0') self._test_route_update_illegal('0.0.0.0/16') class ExtGwModeTestCase(test_ext_gw_mode.ExtGwModeIntTestCase, L3NatTest): def test_router_gateway_set_fail_after_port_create(self): self.skipTest("TBD") vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v3/test_fwaas_v2_driver.py0000666000175100017510000004060113244523345026164 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron_lib.exceptions import firewall_v2 as exceptions from neutron_lib.plugins import directory from vmware_nsx.services.fwaas.nsx_v3 import edge_fwaas_driver_base from vmware_nsx.services.fwaas.nsx_v3 import edge_fwaas_driver_v2 from vmware_nsx.services.fwaas.nsx_v3 import fwaas_callbacks_v2 from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_v3_plugin from vmware_nsxlib.v3 import nsx_constants as consts FAKE_FW_ID = 'fake_fw_uuid' FAKE_ROUTER_ID = 'fake_rtr_uuid' FAKE_PORT_ID = 'fake_port_uuid' FAKE_NET_ID = 'fake_net_uuid' FAKE_NSX_LS_ID = 'fake_nsx_ls_uuid' MOCK_NSX_ID = 'nsx_nsx_router_id' MOCK_DEFAULT_RULE_ID = 'nsx_default_rule_id' MOCK_SECTION_ID = 'sec_id' DEFAULT_RULE = {'is_default': True, 'display_name': edge_fwaas_driver_base.DEFAULT_RULE_NAME, 'id': MOCK_DEFAULT_RULE_ID, 'action': consts.FW_ACTION_DROP} class Nsxv3FwaasTestCase(test_v3_plugin.NsxV3PluginTestCaseMixin): def setUp(self): super(Nsxv3FwaasTestCase, self).setUp() self.firewall = edge_fwaas_driver_v2.EdgeFwaasV3DriverV2() # Start some nsxlib/DB mocks mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalRouter." "get_firewall_section_id", return_value=MOCK_SECTION_ID).start() mock.patch( "vmware_nsxlib.v3.security.NsxLibFirewallSection." "get_default_rule", return_value={'id': MOCK_DEFAULT_RULE_ID}).start() mock.patch( "vmware_nsx.db.db.get_nsx_router_id", return_value=MOCK_NSX_ID).start() self.plugin = directory.get_plugin() self.plugin.fwaas_callbacks = fwaas_callbacks_v2.\ Nsxv3FwaasCallbacksV2() self.plugin.fwaas_callbacks.fwaas_enabled = True self.plugin.fwaas_callbacks.fwaas_driver = self.firewall self.plugin.fwaas_callbacks.internal_driver = self.firewall self.plugin.init_is_complete = True def _default_rule(self): rule = DEFAULT_RULE rule['action'] = consts.FW_ACTION_ALLOW return rule def _fake_rules_v4(self, is_ingress=True): rule1 = {'enabled': True, 'action': 'allow', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '80', 'id': 'fake-fw-rule1', 'description': 'first rule'} rule2 = {'enabled': True, 'action': 'reject', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '22:24', 'source_port': '1:65535', 'id': 'fake-fw-rule2'} rule3 = {'enabled': True, 'action': 'deny', 'ip_version': 4, 'protocol': 'icmp', 'id': 'fake-fw-rule3'} rule4 = {'enabled': True, 'action': 'deny', 'ip_version': 4, 'id': 'fake-fw-rule4'} if is_ingress: # source ips are allowed rule1['source_ip_address'] = '10.24.4.2' else: # dest ips are allowed for egress rules rule1['destination_ip_address'] = '10.24.4.2' return [rule1, rule2, rule3, rule4] def _fake_translated_rules(self, nsx_port_id, is_ingress=True, logged=False): # The expected translation of the rules in _fake_rules_v4 service1 = {'l4_protocol': 'TCP', 'resource_type': 'L4PortSetNSService', 'destination_ports': ['80'], 'source_ports': []} rule1 = {'action': 'ALLOW', 'services': [{'service': service1}], 'sources': [{'target_id': '10.24.4.2', 'target_type': 'IPv4Address'}], 'display_name': 'Fwaas-fake-fw-rule1', 'notes': 'first rule'} if not is_ingress: rule1['destinations'] = rule1['sources'] del rule1['sources'] service2 = {'l4_protocol': 'TCP', 'resource_type': 'L4PortSetNSService', 'destination_ports': ['22-24'], 'source_ports': ['1-65535']} rule2 = {'action': 'DROP', # Reject is replaced with deny 'services': [{'service': service2}], 'display_name': 'Fwaas-fake-fw-rule2'} service3_1 = {'resource_type': 'ICMPTypeNSService', 'protocol': 'ICMPv4'} service3_2 = {'resource_type': 'ICMPTypeNSService', 'protocol': 'ICMPv6'} rule3 = {'action': 'DROP', # icmp is translated to icmp v4 & v6 'services': [{'service': service3_1}, {'service': service3_2}], 'display_name': 'Fwaas-fake-fw-rule3'} rule4 = {'action': 'DROP', 'display_name': 'Fwaas-fake-fw-rule4'} if nsx_port_id: if is_ingress: field = 'destinations' direction = 'IN' else: field = 'sources' direction = 'OUT' new_val = [{'target_id': nsx_port_id, 'target_type': 'LogicalSwitch'}] for rule in (rule1, rule2, rule3, rule4): rule[field] = new_val rule['direction'] = direction if logged: for rule in (rule1, rule2, rule3, rule4): rule['logged'] = logged return [rule1, rule2, rule3, rule4] def _fake_empty_firewall_group(self): fw_inst = {'id': FAKE_FW_ID, 'admin_state_up': True, 'tenant_id': 'tenant-uuid', 'ingress_rule_list': [], 'egress_rule_list': []} return fw_inst def _fake_firewall_group(self, rule_list, is_ingress=True, admin_state_up=True): _rule_list = copy.deepcopy(rule_list) for rule in _rule_list: rule['position'] = str(_rule_list.index(rule)) fw_inst = {'id': FAKE_FW_ID, 'admin_state_up': admin_state_up, 'tenant_id': 'tenant-uuid', 'ingress_rule_list': [], 'egress_rule_list': []} if is_ingress: fw_inst['ingress_rule_list'] = _rule_list else: fw_inst['egress_rule_list'] = _rule_list return fw_inst def _fake_firewall_group_with_admin_down(self, rule_list, is_ingress=True): return self._fake_firewall_group( rule_list, is_ingress=is_ingress, admin_state_up=False) def _fake_apply_list(self): router_inst = {'id': FAKE_ROUTER_ID} router_info_inst = mock.Mock() router_info_inst.router = router_inst router_info_inst.router_id = FAKE_ROUTER_ID apply_list = [(router_info_inst, FAKE_PORT_ID)] return apply_list def test_create_firewall_no_rules(self): apply_list = self._fake_apply_list() firewall = self._fake_empty_firewall_group() port = {'id': FAKE_PORT_ID, 'network_id': FAKE_NET_ID} with mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[port]),\ mock.patch.object(self.plugin, 'get_port', return_value=port),\ mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', return_value=firewall),\ mock.patch("vmware_nsx.db.db.get_nsx_switch_and_port_id", return_value=(FAKE_NSX_LS_ID, 0)),\ mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection." "update") as update_fw: self.firewall.create_firewall_group('nsx', apply_list, firewall) # expecting 2 block rules for the logical port (egress & ingress) # and last default allow all rule expected_rules = [ {'display_name': "Block port ingress", 'action': consts.FW_ACTION_DROP, 'destinations': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'direction': 'IN'}, {'display_name': "Block port egress", 'action': consts.FW_ACTION_DROP, 'sources': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'direction': 'OUT'}, self._default_rule() ] update_fw.assert_called_once_with( MOCK_SECTION_ID, rules=expected_rules) def _setup_firewall_with_rules(self, func, is_ingress=True): apply_list = self._fake_apply_list() rule_list = self._fake_rules_v4(is_ingress=is_ingress) firewall = self._fake_firewall_group(rule_list, is_ingress=is_ingress) port = {'id': FAKE_PORT_ID, 'network_id': FAKE_NET_ID} with mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[port]),\ mock.patch.object(self.plugin, 'get_port', return_value=port),\ mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', return_value=firewall),\ mock.patch("vmware_nsx.db.db.get_nsx_switch_and_port_id", return_value=(FAKE_NSX_LS_ID, 0)),\ mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection." "update") as update_fw: func('nsx', apply_list, firewall) expected_rules = self._fake_translated_rules( FAKE_NSX_LS_ID, is_ingress=is_ingress) + [ {'display_name': "Block port ingress", 'action': consts.FW_ACTION_DROP, 'destinations': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'direction': 'IN'}, {'display_name': "Block port egress", 'action': consts.FW_ACTION_DROP, 'sources': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'direction': 'OUT'}, self._default_rule() ] update_fw.assert_called_once_with( MOCK_SECTION_ID, rules=expected_rules) def test_create_firewall_with_ingress_rules(self): self._setup_firewall_with_rules(self.firewall.create_firewall_group) def test_update_firewall_with_ingress_rules(self): self._setup_firewall_with_rules(self.firewall.update_firewall_group) def test_create_firewall_with_egress_rules(self): self._setup_firewall_with_rules(self.firewall.create_firewall_group, is_ingress=False) def test_update_firewall_with_egress_rules(self): self._setup_firewall_with_rules(self.firewall.update_firewall_group, is_ingress=False) def test_create_firewall_with_illegal_rules(self): """Use ingress rules as the egress list and verify failure""" apply_list = self._fake_apply_list() rule_list = self._fake_rules_v4(is_ingress=True) firewall = self._fake_firewall_group(rule_list, is_ingress=False) self.assertRaises(exceptions.FirewallInternalDriverError, self.firewall.create_firewall_group, 'nsx', apply_list, firewall) def test_delete_firewall(self): apply_list = self._fake_apply_list() firewall = self._fake_empty_firewall_group() port = {'id': FAKE_PORT_ID} with mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[port]),\ mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', return_value=None),\ mock.patch("vmware_nsx.db.db.get_nsx_switch_and_port_id", return_value=(FAKE_NSX_LS_ID, 0)),\ mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection." "update") as update_fw: self.firewall.delete_firewall_group('nsx', apply_list, firewall) update_fw.assert_called_once_with( MOCK_SECTION_ID, rules=[self._default_rule()]) def test_create_firewall_with_admin_down(self): apply_list = self._fake_apply_list() rule_list = self._fake_rules_v4() firewall = self._fake_firewall_group_with_admin_down(rule_list) with mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection." "update") as update_fw: self.firewall.create_firewall_group('nsx', apply_list, firewall) update_fw.assert_called_once_with( MOCK_SECTION_ID, rules=[self._default_rule()]) def test_create_firewall_with_dhcp_relay(self): apply_list = self._fake_apply_list() firewall = self._fake_empty_firewall_group() port = {'id': FAKE_PORT_ID, 'network_id': FAKE_NET_ID} relay_server = '1.1.1.1' with mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[port]),\ mock.patch.object(self.plugin, 'get_port', return_value=port),\ mock.patch.object(self.plugin, '_get_port_relay_servers', return_value=[relay_server]),\ mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', return_value=firewall),\ mock.patch("vmware_nsx.db.db.get_nsx_switch_and_port_id", return_value=(FAKE_NSX_LS_ID, 0)),\ mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection." "update") as update_fw: self.firewall.create_firewall_group('nsx', apply_list, firewall) # expecting 2 allow rules for the relay servers, # 2 block rules for the logical port (egress & ingress) # and last default allow all rule expected_rules = [ {'display_name': "DHCP Relay ingress traffic", 'action': consts.FW_ACTION_ALLOW, 'destinations': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'sources': [{'target_id': relay_server, 'target_type': 'IPv4Address'}], 'services': self.plugin._get_port_relay_services(), 'direction': 'IN'}, {'display_name': "DHCP Relay egress traffic", 'action': consts.FW_ACTION_ALLOW, 'sources': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'destinations': [{'target_id': relay_server, 'target_type': 'IPv4Address'}], 'services': self.plugin._get_port_relay_services(), 'direction': 'OUT'}, {'display_name': "Block port ingress", 'action': consts.FW_ACTION_DROP, 'destinations': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'direction': 'IN'}, {'display_name': "Block port egress", 'action': consts.FW_ACTION_DROP, 'sources': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'direction': 'OUT'}, self._default_rule() ] update_fw.assert_called_once_with( MOCK_SECTION_ID, rules=expected_rules) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v3/test_api_replay.py0000666000175100017510000000504313244523345025227 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from vmware_nsx.tests.unit.nsx_v3 import test_plugin from neutron_lib.api import attributes from neutron_lib.plugins import directory from oslo_config import cfg class TestApiReplay(test_plugin.NsxV3PluginTestCaseMixin): def setUp(self, plugin=None, ext_mgr=None, service_plugins=None): # enables api_replay_mode for these tests cfg.CONF.set_override('api_replay_mode', True) super(TestApiReplay, self).setUp() def tearDown(self): # disables api_replay_mode for these tests cfg.CONF.set_override('api_replay_mode', False) # remove the extension from the plugin directory.get_plugin().supported_extension_aliases.remove( 'api-replay') # Revert the attributes map back to normal for attr_name in ('ports', 'networks', 'security_groups', 'security_group_rules', 'routers', 'policies'): attr_info = attributes.RESOURCES[attr_name] attr_info['id']['allow_post'] = False super(TestApiReplay, self).tearDown() def test_create_port_specify_id(self): specified_network_id = '555e762b-d7a1-4b44-b09b-2a34ada56c9f' specified_port_id = 'e55e762b-d7a1-4b44-b09b-2a34ada56c9f' network_res = self._create_network(self.fmt, 'test-network', True, arg_list=('id',), id=specified_network_id) network = self.deserialize(self.fmt, network_res) self.assertEqual(specified_network_id, network['network']['id']) port_res = self._create_port(self.fmt, network['network']['id'], arg_list=('id',), id=specified_port_id) port = self.deserialize(self.fmt, port_res) self.assertEqual(specified_port_id, port['port']['id']) vmware-nsx-12.0.1/vmware_nsx/tests/unit/db/0000775000175100017510000000000013244524600020625 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/db/__init__.py0000666000175100017510000000000013244523345022733 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/db/test_migrations.py0000666000175100017510000000772113244523345024430 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.db.migration.alembic_migrations import external from neutron.db.migration import cli as migration from neutron.tests.functional.db import test_migrations from neutron.tests.unit import testlib_api from vmware_nsx.db.migration import alembic_migrations from vmware_nsx.db.migration.models import head #TODO(abhiraut): Remove this list from here once *aaS repos forms its # own list. # Add *aaS tables to EXTERNAL_TABLES since they should not be # tested. LBAAS_TABLES = { 'nsxv_edge_monitor_mappings', 'nsxv_edge_pool_mappings', 'nsxv_edge_vip_mappings', # LBaaS v2 tables 'lbaas_healthmonitors', 'lbaas_l7policies', 'lbaas_l7rules', 'lbaas_listeners', 'lbaas_loadbalancer_statistics', 'lbaas_loadbalanceragentbindings', 'lbaas_loadbalancers', 'lbaas_members', 'lbaas_pools', 'lbaas_sessionpersistences', 'lbaas_sni', } L2GW_TABLES = { 'l2gw_alembic_version', 'physical_locators', 'physical_switches', 'physical_ports', 'logical_switches', 'ucast_macs_locals', 'ucast_macs_remotes', 'vlan_bindings', 'l2gatewayconnections', 'l2gatewayinterfaces', 'l2gatewaydevices', 'l2gateways', 'pending_ucast_macs_remotes' } SFC_TABLES = { 'sfc_flow_classifier_l7_parameters', 'sfc_flow_classifiers', 'sfc_port_chain_parameters', 'sfc_service_function_params', 'sfc_port_pair_group_params', 'sfc_chain_classifier_associations', 'sfc_port_pairs', 'sfc_chain_group_associations', 'sfc_port_pair_groups', 'sfc_port_chains', 'sfc_uuid_intid_associations', 'sfc_path_port_associations', 'sfc_portpair_details', 'sfc_path_nodes', } TAAS_TABLES = { 'tap_services', 'tap_flows', 'tap_id_associations', } FWAAS_TABLES = { 'firewall_router_associations', 'cisco_firewall_associations', } # EXTERNAL_TABLES should contain all names of tables that are not related to # current repo. EXTERNAL_TABLES = (set(external.TABLES) | LBAAS_TABLES | L2GW_TABLES | SFC_TABLES | TAAS_TABLES | FWAAS_TABLES) class _TestModelsMigrationsFoo(test_migrations._TestModelsMigrations): def db_sync(self, engine): cfg.CONF.set_override('connection', engine.url, group='database') for conf in migration.get_alembic_configs(): self.alembic_config = conf self.alembic_config.neutron_config = cfg.CONF migration.do_alembic_command(conf, 'upgrade', 'heads') def get_metadata(self): return head.get_metadata() def include_object(self, object_, name, type_, reflected, compare_to): if type_ == 'table' and (name.startswith('alembic') or name == alembic_migrations.VERSION_TABLE or name in EXTERNAL_TABLES): return False if type_ == 'index' and reflected and name.startswith("idx_autoinc_"): return False return True class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin, _TestModelsMigrationsFoo, testlib_api.SqlTestCaseLight): pass class TestModelsMigrationsPsql(testlib_api.PostgreSQLTestCaseMixin, _TestModelsMigrationsFoo, testlib_api.SqlTestCaseLight): pass vmware-nsx-12.0.1/vmware_nsx/tests/unit/test_utils.py0000666000175100017510000000372313244523345023025 0ustar zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg def override_nsx_ini_test(): cfg.CONF.set_override("default_tz_uuid", "fake_tz_uuid") cfg.CONF.set_override("nsx_controllers", ["fake1", "fake_2"]) cfg.CONF.set_override("nsx_user", "foo") cfg.CONF.set_override("nsx_password", "bar") cfg.CONF.set_override("default_l3_gw_service_uuid", "whatever") cfg.CONF.set_override("default_l2_gw_service_uuid", "whatever") cfg.CONF.set_override("manager_uri", "https://fake_manager", group="nsxv") cfg.CONF.set_override("user", "fake_user", group="nsxv") cfg.CONF.set_override("password", "fake_password", group="nsxv") cfg.CONF.set_override("vdn_scope_id", "fake_vdn_scope_id", group="nsxv") cfg.CONF.set_override("dvs_id", "fake_dvs_id", group="nsxv") def override_nsx_ini_full_test(): cfg.CONF.set_override("default_tz_uuid", "fake_tz_uuid") cfg.CONF.set_override("nsx_controllers", ["fake1", "fake_2"]) cfg.CONF.set_override("nsx_user", "foo") cfg.CONF.set_override("nsx_password", "bar") cfg.CONF.set_override("default_l3_gw_service_uuid", "whatever") cfg.CONF.set_override("default_l2_gw_service_uuid", "whatever") cfg.CONF.set_override("nsx_default_interface_name", "whatever") cfg.CONF.set_override("http_timeout", 13) cfg.CONF.set_override("redirects", 12) cfg.CONF.set_override("retries", "11") vmware-nsx-12.0.1/vmware_nsx/tests/unit/__init__.py0000666000175100017510000000456213244523345022367 0ustar zuulzuul00000000000000# Copyright 2013 OpenStack Foundation. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import time import eventlet import mock from vmware_nsx.api_client import client as nsx_client from vmware_nsx.api_client import eventlet_client from vmware_nsx import extensions import vmware_nsx.plugin as neutron_plugin from vmware_nsx.plugins.nsx_v.vshield.common import ( VcnsApiClient as vcnsapi) from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.plugins.nsx_v.vshield import vcns import vmware_nsx.plugins.nsx_v.vshield.vcns_driver as vcnsdriver plugin = neutron_plugin.NsxPlugin api_client = nsx_client.NsxApiClient evt_client = eventlet_client.EventletApiClient vcns_class = vcns.Vcns vcns_driver = vcnsdriver.VcnsDriver vcns_api_helper = vcnsapi.VcnsApiHelper edge_manage_class = edge_utils.EdgeManager STUBS_PATH = os.path.join(os.path.dirname(__file__), 'etc') NSXEXT_PATH = os.path.dirname(extensions.__file__) NSXAPI_NAME = '%s.%s' % (api_client.__module__, api_client.__name__) PLUGIN_NAME = '%s.%s' % (plugin.__module__, plugin.__name__) CLIENT_NAME = '%s.%s' % (evt_client.__module__, evt_client.__name__) VCNS_NAME = '%s.%s' % (vcns_class.__module__, vcns_class.__name__) VCNS_DRIVER_NAME = '%s.%s' % (vcns_driver.__module__, vcns_driver.__name__) VCNSAPI_NAME = '%s.%s' % (vcns_api_helper.__module__, vcns_api_helper.__name__) EDGE_MANAGE_NAME = '%s.%s' % (edge_manage_class.__module__, edge_manage_class.__name__) # Mock for the tenacity retrying sleeping method eventlet.monkey_patch() mocked_retry_sleep = mock.patch.object(time, 'sleep') mocked_retry_sleep.start() def get_fake_conf(filename): return os.path.join(STUBS_PATH, filename) def nsx_method(method_name, module_name='nsxlib'): return '%s.%s.%s' % ('vmware_nsx', module_name, method_name) vmware-nsx-12.0.1/vmware_nsx/tests/unit/extensions/0000775000175100017510000000000013244524600022437 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/extensions/test_networkgw.py0000666000175100017510000015134213244523345026114 0ustar zuulzuul00000000000000# Copyright 2012 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from neutron.api import extensions from neutron.db import api as db_api from neutron.db import db_base_plugin_v2 from neutron import quota from neutron.tests import base from neutron.tests.unit.api import test_extensions from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_plugin from neutron_lib import context from neutron_lib.plugins import constants from neutron_lib.plugins import directory from oslo_config import cfg from webob import exc import webtest from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import networkgw_db from vmware_nsx.db import nsx_models from vmware_nsx.extensions import networkgw from vmware_nsx.nsxlib import mh as nsxlib from vmware_nsx.nsxlib.mh import l2gateway as l2gwlib from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsx_mh import test_plugin as test_nsx_plugin _uuid = test_base._uuid _get_path = test_base._get_path class TestExtensionManager(object): def get_resources(self): return networkgw.Networkgw.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class NetworkGatewayExtensionTestCase(base.BaseTestCase): def setUp(self): super(NetworkGatewayExtensionTestCase, self).setUp() plugin = '%s.%s' % (networkgw.__name__, networkgw.NetworkGatewayPluginBase.__name__) self._gw_resource = networkgw.GATEWAY_RESOURCE_NAME self._dev_resource = networkgw.DEVICE_RESOURCE_NAME # Ensure existing ExtensionManager is not used extensions.PluginAwareExtensionManager._instance = None # Create the default configurations self.config_parse() # Update the plugin and extensions path self.setup_coreplugin(plugin) _plugin_patcher = mock.patch(plugin, autospec=True) self.plugin = _plugin_patcher.start() # Instantiate mock plugin and enable extensions self.plugin.return_value.supported_extension_aliases = ( [networkgw.EXT_ALIAS]) directory.add_plugin(constants.CORE, self.plugin.return_value) ext_mgr = TestExtensionManager() extensions.PluginAwareExtensionManager._instance = ext_mgr self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr) self.api = webtest.TestApp(self.ext_mdw) quota.QUOTAS._driver = None cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') def test_network_gateway_create(self): nw_gw_id = _uuid() tenant_id = _uuid() data = {self._gw_resource: {'name': 'nw-gw', 'tenant_id': tenant_id, 'project_id': tenant_id, 'devices': [{'id': _uuid(), 'interface_name': 'xxx'}]}} return_value = data[self._gw_resource].copy() return_value.update({'id': nw_gw_id}) instance = self.plugin.return_value instance.create_network_gateway.return_value = return_value res = self.api.post_json(_get_path(networkgw.NETWORK_GATEWAYS), data) instance.create_network_gateway.assert_called_with( mock.ANY, network_gateway=data) self.assertEqual(res.status_int, exc.HTTPCreated.code) self.assertIn(self._gw_resource, res.json) nw_gw = res.json[self._gw_resource] self.assertEqual(nw_gw['id'], nw_gw_id) def _test_network_gateway_create_with_error( self, data, error_code=exc.HTTPBadRequest.code): res = self.api.post_json(_get_path(networkgw.NETWORK_GATEWAYS), data, expect_errors=True) self.assertEqual(res.status_int, error_code) def test_network_gateway_create_invalid_device_spec(self): data = {self._gw_resource: {'name': 'nw-gw', 'tenant_id': _uuid(), 'devices': [{'id': _uuid(), 'invalid': 'xxx'}]}} self._test_network_gateway_create_with_error(data) def test_network_gateway_create_extra_attr_in_device_spec(self): data = {self._gw_resource: {'name': 'nw-gw', 'tenant_id': _uuid(), 'devices': [{'id': _uuid(), 'interface_name': 'xxx', 'extra_attr': 'onetoomany'}]}} self._test_network_gateway_create_with_error(data) def test_network_gateway_update(self): nw_gw_name = 'updated' data = {self._gw_resource: {'name': nw_gw_name}} nw_gw_id = _uuid() return_value = {'id': nw_gw_id, 'name': nw_gw_name} instance = self.plugin.return_value instance.update_network_gateway.return_value = return_value res = self.api.put_json( _get_path('%s/%s' % (networkgw.NETWORK_GATEWAYS, nw_gw_id)), data) instance.update_network_gateway.assert_called_with( mock.ANY, nw_gw_id, network_gateway=data) self.assertEqual(res.status_int, exc.HTTPOk.code) self.assertIn(self._gw_resource, res.json) nw_gw = res.json[self._gw_resource] self.assertEqual(nw_gw['id'], nw_gw_id) self.assertEqual(nw_gw['name'], nw_gw_name) def test_network_gateway_delete(self): nw_gw_id = _uuid() instance = self.plugin.return_value res = self.api.delete(_get_path('%s/%s' % (networkgw.NETWORK_GATEWAYS, nw_gw_id))) instance.delete_network_gateway.assert_called_with(mock.ANY, nw_gw_id) self.assertEqual(res.status_int, exc.HTTPNoContent.code) def test_network_gateway_get(self): nw_gw_id = _uuid() return_value = {self._gw_resource: {'name': 'test', 'devices': [{'id': _uuid(), 'interface_name': 'xxx'}], 'id': nw_gw_id}} instance = self.plugin.return_value instance.get_network_gateway.return_value = return_value res = self.api.get(_get_path('%s/%s' % (networkgw.NETWORK_GATEWAYS, nw_gw_id))) instance.get_network_gateway.assert_called_with(mock.ANY, nw_gw_id, fields=mock.ANY) self.assertEqual(res.status_int, exc.HTTPOk.code) def test_network_gateway_list(self): nw_gw_id = _uuid() return_value = [{self._gw_resource: {'name': 'test', 'devices': [{'id': _uuid(), 'interface_name': 'xxx'}], 'id': nw_gw_id}}] instance = self.plugin.return_value instance.get_network_gateways.return_value = return_value res = self.api.get(_get_path(networkgw.NETWORK_GATEWAYS)) instance.get_network_gateways.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY) self.assertEqual(res.status_int, exc.HTTPOk.code) def test_network_gateway_connect(self): nw_gw_id = _uuid() nw_id = _uuid() gw_port_id = _uuid() mapping_data = {'network_id': nw_id, 'segmentation_type': 'vlan', 'segmentation_id': '999'} return_value = {'connection_info': { 'network_gateway_id': nw_gw_id, 'port_id': gw_port_id, 'network_id': nw_id}} instance = self.plugin.return_value instance.connect_network.return_value = return_value res = self.api.put_json(_get_path('%s/%s/connect_network' % (networkgw.NETWORK_GATEWAYS, nw_gw_id)), mapping_data) instance.connect_network.assert_called_with(mock.ANY, nw_gw_id, mapping_data) self.assertEqual(res.status_int, exc.HTTPOk.code) nw_conn_res = res.json['connection_info'] self.assertEqual(nw_conn_res['port_id'], gw_port_id) self.assertEqual(nw_conn_res['network_id'], nw_id) def test_network_gateway_disconnect(self): nw_gw_id = _uuid() nw_id = _uuid() mapping_data = {'network_id': nw_id} instance = self.plugin.return_value res = self.api.put_json(_get_path('%s/%s/disconnect_network' % (networkgw.NETWORK_GATEWAYS, nw_gw_id)), mapping_data) instance.disconnect_network.assert_called_with(mock.ANY, nw_gw_id, mapping_data) self.assertEqual(res.status_int, exc.HTTPOk.code) def test_gateway_device_get(self): gw_dev_id = _uuid() return_value = {self._dev_resource: {'name': 'test', 'connector_type': 'stt', 'connector_ip': '1.1.1.1', 'id': gw_dev_id}} instance = self.plugin.return_value instance.get_gateway_device.return_value = return_value res = self.api.get(_get_path('%s/%s' % (networkgw.GATEWAY_DEVICES, gw_dev_id))) instance.get_gateway_device.assert_called_with(mock.ANY, gw_dev_id, fields=mock.ANY) self.assertEqual(res.status_int, exc.HTTPOk.code) def test_gateway_device_list(self): gw_dev_id = _uuid() return_value = [{self._dev_resource: {'name': 'test', 'connector_type': 'stt', 'connector_ip': '1.1.1.1', 'id': gw_dev_id}}] instance = self.plugin.return_value instance.get_gateway_devices.return_value = return_value res = self.api.get(_get_path(networkgw.GATEWAY_DEVICES)) instance.get_gateway_devices.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY) self.assertEqual(res.status_int, exc.HTTPOk.code) def test_gateway_device_create(self): gw_dev_id = _uuid() tenant_id = _uuid() data = {self._dev_resource: {'name': 'test-dev', 'tenant_id': tenant_id, 'project_id': tenant_id, 'client_certificate': 'xyz', 'connector_type': 'stt', 'connector_ip': '1.1.1.1'}} return_value = data[self._dev_resource].copy() return_value.update({'id': gw_dev_id}) instance = self.plugin.return_value instance.create_gateway_device.return_value = return_value res = self.api.post_json(_get_path(networkgw.GATEWAY_DEVICES), data) instance.create_gateway_device.assert_called_with( mock.ANY, gateway_device=data) self.assertEqual(res.status_int, exc.HTTPCreated.code) self.assertIn(self._dev_resource, res.json) gw_dev = res.json[self._dev_resource] self.assertEqual(gw_dev['id'], gw_dev_id) def _test_gateway_device_create_with_error( self, data, error_code=exc.HTTPBadRequest.code): res = self.api.post_json(_get_path(networkgw.GATEWAY_DEVICES), data, expect_errors=True) self.assertEqual(res.status_int, error_code) def test_gateway_device_create_invalid_connector_type(self): data = {self._gw_resource: {'name': 'test-dev', 'client_certificate': 'xyz', 'tenant_id': _uuid(), 'connector_type': 'invalid', 'connector_ip': '1.1.1.1'}} self._test_gateway_device_create_with_error(data) def test_gateway_device_create_invalid_connector_ip(self): data = {self._gw_resource: {'name': 'test-dev', 'client_certificate': 'xyz', 'tenant_id': _uuid(), 'connector_type': 'stt', 'connector_ip': 'invalid'}} self._test_gateway_device_create_with_error(data) def test_gateway_device_create_extra_attr_in_device_spec(self): data = {self._gw_resource: {'name': 'test-dev', 'client_certificate': 'xyz', 'tenant_id': _uuid(), 'alien_attribute': 'E.T.', 'connector_type': 'stt', 'connector_ip': '1.1.1.1'}} self._test_gateway_device_create_with_error(data) def test_gateway_device_update(self): gw_dev_name = 'updated' data = {self._dev_resource: {'name': gw_dev_name}} gw_dev_id = _uuid() return_value = {'id': gw_dev_id, 'name': gw_dev_name} instance = self.plugin.return_value instance.update_gateway_device.return_value = return_value res = self.api.put_json( _get_path('%s/%s' % (networkgw.GATEWAY_DEVICES, gw_dev_id)), data) instance.update_gateway_device.assert_called_with( mock.ANY, gw_dev_id, gateway_device=data) self.assertEqual(res.status_int, exc.HTTPOk.code) self.assertIn(self._dev_resource, res.json) gw_dev = res.json[self._dev_resource] self.assertEqual(gw_dev['id'], gw_dev_id) self.assertEqual(gw_dev['name'], gw_dev_name) def test_gateway_device_delete(self): gw_dev_id = _uuid() instance = self.plugin.return_value res = self.api.delete(_get_path('%s/%s' % (networkgw.GATEWAY_DEVICES, gw_dev_id))) instance.delete_gateway_device.assert_called_with(mock.ANY, gw_dev_id) self.assertEqual(res.status_int, exc.HTTPNoContent.code) class NetworkGatewayDbTestCase(test_db_plugin.NeutronDbPluginV2TestCase): """Unit tests for Network Gateway DB support.""" def setUp(self, plugin=None, ext_mgr=None): if not plugin: plugin = '%s.%s' % (__name__, TestNetworkGatewayPlugin.__name__) if not ext_mgr: ext_mgr = TestExtensionManager() self.gw_resource = networkgw.GATEWAY_RESOURCE_NAME self.dev_resource = networkgw.DEVICE_RESOURCE_NAME super(NetworkGatewayDbTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def _create_network_gateway(self, fmt, tenant_id, name=None, devices=None, arg_list=None, **kwargs): data = {self.gw_resource: {'tenant_id': tenant_id, 'devices': devices}} if name: data[self.gw_resource]['name'] = name for arg in arg_list or (): # Arg must be present and not empty if arg in kwargs and kwargs[arg]: data[self.gw_resource][arg] = kwargs[arg] nw_gw_req = self.new_create_request(networkgw.NETWORK_GATEWAYS, data, fmt) if (kwargs.get('set_context') and tenant_id): # create a specific auth context for this request nw_gw_req.environ['neutron.context'] = context.Context( '', tenant_id) return nw_gw_req.get_response(self.ext_api) @contextlib.contextmanager def _network_gateway(self, name='gw1', devices=None, fmt='json', tenant_id=_uuid()): device = None if not devices: device_res = self._create_gateway_device( fmt, tenant_id, 'stt', '1.1.1.1', 'xxxxxx', name='whatever') if device_res.status_int >= 400: raise exc.HTTPClientError(code=device_res.status_int) device = self.deserialize(fmt, device_res) devices = [{'id': device[self.dev_resource]['id'], 'interface_name': 'xyz'}] res = self._create_network_gateway(fmt, tenant_id, name=name, devices=devices) if res.status_int >= 400: raise exc.HTTPClientError(code=res.status_int) network_gateway = self.deserialize(fmt, res) yield network_gateway self._delete(networkgw.NETWORK_GATEWAYS, network_gateway[self.gw_resource]['id']) if device: self._delete(networkgw.GATEWAY_DEVICES, device[self.dev_resource]['id']) def _create_gateway_device(self, fmt, tenant_id, connector_type, connector_ip, client_certificate, name=None, set_context=False): data = {self.dev_resource: {'tenant_id': tenant_id, 'connector_type': connector_type, 'connector_ip': connector_ip, 'client_certificate': client_certificate}} if name: data[self.dev_resource]['name'] = name gw_dev_req = self.new_create_request(networkgw.GATEWAY_DEVICES, data, fmt) if (set_context and tenant_id): # create a specific auth context for this request gw_dev_req.environ['neutron.context'] = context.Context( '', tenant_id) return gw_dev_req.get_response(self.ext_api) def _update_gateway_device(self, fmt, gateway_device_id, connector_type=None, connector_ip=None, client_certificate=None, name=None, set_context=False, tenant_id=None): data = {self.dev_resource: {}} if connector_type: data[self.dev_resource]['connector_type'] = connector_type if connector_ip: data[self.dev_resource]['connector_ip'] = connector_ip if client_certificate: data[self.dev_resource]['client_certificate'] = client_certificate if name: data[self.dev_resource]['name'] = name gw_dev_req = self.new_update_request(networkgw.GATEWAY_DEVICES, data, gateway_device_id, fmt) if (set_context and tenant_id): # create a specific auth context for this request gw_dev_req.environ['neutron.context'] = context.Context( '', tenant_id) return gw_dev_req.get_response(self.ext_api) @contextlib.contextmanager def _gateway_device(self, name='gw_dev', connector_type='stt', connector_ip='1.1.1.1', client_certificate='xxxxxxxxxxxxxxx', fmt='json', tenant_id=_uuid()): res = self._create_gateway_device( fmt, tenant_id, connector_type=connector_type, connector_ip=connector_ip, client_certificate=client_certificate, name=name) if res.status_int >= 400: raise exc.HTTPClientError(code=res.status_int) gateway_device = self.deserialize(fmt, res) yield gateway_device self._delete(networkgw.GATEWAY_DEVICES, gateway_device[self.dev_resource]['id']) def _gateway_action(self, action, network_gateway_id, network_id, segmentation_type, segmentation_id=None, expected_status=exc.HTTPOk.code): connection_data = {'network_id': network_id, 'segmentation_type': segmentation_type} if segmentation_id: connection_data['segmentation_id'] = segmentation_id req = self.new_action_request(networkgw.NETWORK_GATEWAYS, connection_data, network_gateway_id, "%s_network" % action) res = req.get_response(self.ext_api) self.assertEqual(res.status_int, expected_status) return self.deserialize('json', res) def _test_connect_and_disconnect_network(self, segmentation_type, segmentation_id=None): with self._network_gateway() as gw: with self.network() as net: body = self._gateway_action('connect', gw[self.gw_resource]['id'], net['network']['id'], segmentation_type, segmentation_id) self.assertIn('connection_info', body) connection_info = body['connection_info'] for attr in ('network_id', 'port_id', 'network_gateway_id'): self.assertIn(attr, connection_info) # fetch port and confirm device_id gw_port_id = connection_info['port_id'] port_body = self._show('ports', gw_port_id) self.assertEqual(port_body['port']['device_id'], gw[self.gw_resource]['id']) # Clean up - otherwise delete will fail body = self._gateway_action('disconnect', gw[self.gw_resource]['id'], net['network']['id'], segmentation_type, segmentation_id) # Check associated port has been deleted too body = self._show('ports', gw_port_id, expected_code=exc.HTTPNotFound.code) def test_create_network_gateway(self): tenant_id = _uuid() _gateway_device = (lambda name: self._gateway_device(name=name, tenant_id=tenant_id)) with _gateway_device('dev_1') as dev_1: with _gateway_device('dev_2') as dev_2: name = 'test-gw' dev_1_id = dev_1[self.dev_resource]['id'] dev_2_id = dev_2[self.dev_resource]['id'] devices = [{'id': dev_1_id, 'interface_name': 'xxx'}, {'id': dev_2_id, 'interface_name': 'yyy'}] keys = [('devices', devices), ('name', name)] with self._network_gateway(name=name, devices=devices, tenant_id=tenant_id) as gw: for k, v in keys: self.assertEqual(gw[self.gw_resource][k], v) def test_create_network_gateway_no_interface_name(self): tenant_id = _uuid() with self._gateway_device(tenant_id=tenant_id) as dev: name = 'test-gw' devices = [{'id': dev[self.dev_resource]['id']}] exp_devices = devices exp_devices[0]['interface_name'] = 'breth0' keys = [('devices', exp_devices), ('name', name)] with self._network_gateway(name=name, devices=devices, tenant_id=tenant_id) as gw: for k, v in keys: self.assertEqual(gw[self.gw_resource][k], v) def test_create_network_gateway_not_owned_device_raises_404(self): # Create a device with a different tenant identifier with self._gateway_device(name='dev', tenant_id=_uuid()) as dev: name = 'test-gw' dev_id = dev[self.dev_resource]['id'] devices = [{'id': dev_id, 'interface_name': 'xxx'}] res = self._create_network_gateway( 'json', _uuid(), name=name, devices=devices) self.assertEqual(404, res.status_int) def test_create_network_gateway_non_existent_device_raises_404(self): name = 'test-gw' devices = [{'id': _uuid(), 'interface_name': 'xxx'}] res = self._create_network_gateway( 'json', _uuid(), name=name, devices=devices) self.assertEqual(404, res.status_int) def test_delete_network_gateway(self): tenant_id = _uuid() with self._gateway_device(tenant_id=tenant_id) as dev: name = 'test-gw' device_id = dev[self.dev_resource]['id'] devices = [{'id': device_id, 'interface_name': 'xxx'}] with self._network_gateway(name=name, devices=devices, tenant_id=tenant_id) as gw: # Nothing to do here - just let the gateway go gw_id = gw[self.gw_resource]['id'] # Verify nothing left on db session = db_api.get_reader_session() dev_query = session.query( nsx_models.NetworkGatewayDevice).filter( nsx_models.NetworkGatewayDevice.id == device_id) self.assertIsNone(dev_query.first()) gw_query = session.query(nsx_models.NetworkGateway).filter( nsx_models.NetworkGateway.id == gw_id) self.assertIsNone(gw_query.first()) def test_update_network_gateway(self): with self._network_gateway() as gw: data = {self.gw_resource: {'name': 'new_name'}} req = self.new_update_request(networkgw.NETWORK_GATEWAYS, data, gw[self.gw_resource]['id']) res = self.deserialize('json', req.get_response(self.ext_api)) self.assertEqual(res[self.gw_resource]['name'], data[self.gw_resource]['name']) def test_get_network_gateway(self): with self._network_gateway(name='test-gw') as gw: req = self.new_show_request(networkgw.NETWORK_GATEWAYS, gw[self.gw_resource]['id']) res = self.deserialize('json', req.get_response(self.ext_api)) self.assertEqual(res[self.gw_resource]['name'], gw[self.gw_resource]['name']) def test_list_network_gateways(self): with self._network_gateway(name='test-gw-1') as gw1: with self._network_gateway(name='test_gw_2') as gw2: req = self.new_list_request(networkgw.NETWORK_GATEWAYS) res = self.deserialize('json', req.get_response(self.ext_api)) key = self.gw_resource + 's' self.assertEqual(len(res[key]), 2) self.assertEqual(res[key][0]['name'], gw1[self.gw_resource]['name']) self.assertEqual(res[key][1]['name'], gw2[self.gw_resource]['name']) def _test_list_network_gateway_with_multiple_connections( self, expected_gateways=1): with self._network_gateway() as gw: with self.network() as net_1: self._gateway_action('connect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 555) self._gateway_action('connect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 777) req = self.new_list_request(networkgw.NETWORK_GATEWAYS) res = self.deserialize('json', req.get_response(self.ext_api)) key = self.gw_resource + 's' self.assertEqual(len(res[key]), expected_gateways) for item in res[key]: self.assertIn('ports', item) if item['id'] == gw[self.gw_resource]['id']: gw_ports = item['ports'] self.assertEqual(len(gw_ports), 2) segmentation_ids = [555, 777] for gw_port in gw_ports: self.assertEqual('vlan', gw_port['segmentation_type']) self.assertIn(gw_port['segmentation_id'], segmentation_ids) segmentation_ids.remove(gw_port['segmentation_id']) # Required cleanup self._gateway_action('disconnect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 555) self._gateway_action('disconnect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 777) def test_list_network_gateway_with_multiple_connections(self): self._test_list_network_gateway_with_multiple_connections() def test_connect_and_disconnect_network(self): self._test_connect_and_disconnect_network('flat') def test_connect_and_disconnect_network_no_seg_type(self): self._test_connect_and_disconnect_network(None) def test_connect_and_disconnect_network_vlan_with_segmentation_id(self): self._test_connect_and_disconnect_network('vlan', 999) def test_connect_and_disconnect_network_vlan_without_segmentation_id(self): self._test_connect_and_disconnect_network('vlan') def test_connect_network_multiple_times(self): with self._network_gateway() as gw: with self.network() as net_1: self._gateway_action('connect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 555) self._gateway_action('connect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 777) self._gateway_action('disconnect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 555) self._gateway_action('disconnect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 777) def test_connect_network_multiple_gateways(self): with self._network_gateway() as gw_1: with self._network_gateway() as gw_2: with self.network() as net_1: self._gateway_action('connect', gw_1[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 555) self._gateway_action('connect', gw_2[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 555) self._gateway_action('disconnect', gw_1[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 555) self._gateway_action('disconnect', gw_2[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 555) def test_connect_network_mapping_in_use_returns_409(self): with self._network_gateway() as gw: with self.network() as net_1: self._gateway_action('connect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 555) with self.network() as net_2: self._gateway_action('connect', gw[self.gw_resource]['id'], net_2['network']['id'], 'vlan', 555, expected_status=exc.HTTPConflict.code) # Clean up - otherwise delete will fail self._gateway_action('disconnect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 555) def test_connect_network_vlan_invalid_seg_id_returns_400(self): with self._network_gateway() as gw: with self.network() as net: # above upper bound self._gateway_action('connect', gw[self.gw_resource]['id'], net['network']['id'], 'vlan', 4095, expected_status=exc.HTTPBadRequest.code) # below lower bound (0 is valid for NSX plugin) self._gateway_action('connect', gw[self.gw_resource]['id'], net['network']['id'], 'vlan', -1, expected_status=exc.HTTPBadRequest.code) def test_connect_invalid_network_returns_400(self): with self._network_gateway() as gw: self._gateway_action('connect', gw[self.gw_resource]['id'], 'hohoho', 'vlan', 555, expected_status=exc.HTTPBadRequest.code) def test_connect_unspecified_network_returns_400(self): with self._network_gateway() as gw: self._gateway_action('connect', gw[self.gw_resource]['id'], None, 'vlan', 555, expected_status=exc.HTTPBadRequest.code) def test_disconnect_network_ambiguous_returns_409(self): with self._network_gateway() as gw: with self.network() as net_1: self._gateway_action('connect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 555) self._gateway_action('connect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 777) # This should raise self._gateway_action('disconnect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', expected_status=exc.HTTPConflict.code) self._gateway_action('disconnect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 555) self._gateway_action('disconnect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 777) def test_delete_active_gateway_port_returns_409(self): with self._network_gateway() as gw: with self.network() as net_1: body = self._gateway_action('connect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 555) # fetch port id and try to delete it gw_port_id = body['connection_info']['port_id'] self._delete('ports', gw_port_id, expected_code=exc.HTTPConflict.code) body = self._gateway_action('disconnect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 555) def test_delete_network_gateway_active_connections_returns_409(self): with self._network_gateway() as gw: with self.network() as net_1: self._gateway_action('connect', gw[self.gw_resource]['id'], net_1['network']['id'], 'flat') self._delete(networkgw.NETWORK_GATEWAYS, gw[self.gw_resource]['id'], expected_code=exc.HTTPConflict.code) self._gateway_action('disconnect', gw[self.gw_resource]['id'], net_1['network']['id'], 'flat') def test_disconnect_non_existing_connection_returns_404(self): with self._network_gateway() as gw: with self.network() as net_1: self._gateway_action('connect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 555) self._gateway_action('disconnect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 999, expected_status=exc.HTTPNotFound.code) self._gateway_action('disconnect', gw[self.gw_resource]['id'], net_1['network']['id'], 'vlan', 555) def test_create_gateway_device( self, expected_status=networkgw_db.STATUS_UNKNOWN): with self._gateway_device(name='test-dev', connector_type='stt', connector_ip='1.1.1.1', client_certificate='xyz') as dev: self.assertEqual(dev[self.dev_resource]['name'], 'test-dev') self.assertEqual(dev[self.dev_resource]['connector_type'], 'stt') self.assertEqual(dev[self.dev_resource]['connector_ip'], '1.1.1.1') self.assertEqual(dev[self.dev_resource]['status'], expected_status) def test_list_gateway_devices(self): gateway_device = (lambda name, connector_ip, client_cert: self._gateway_device(name=name, connector_type='stt', connector_ip=connector_ip, client_certificate=client_cert)) with gateway_device('test-dev-1', '1.1.1.1', 'xyz') as dev_1,\ gateway_device('test-dev-2', '2.2.2.2', 'qwe') as dev_2: req = self.new_list_request(networkgw.GATEWAY_DEVICES) res = self.deserialize('json', req.get_response(self.ext_api)) devices = res[networkgw.GATEWAY_DEVICES.replace('-', '_')] self.assertEqual(len(devices), 2) dev_1 = devices[0] dev_2 = devices[1] self.assertEqual(dev_1['name'], 'test-dev-1') self.assertEqual(dev_2['name'], 'test-dev-2') def test_get_gateway_device( self, expected_status=networkgw_db.STATUS_UNKNOWN): with self._gateway_device(name='test-dev', connector_type='stt', connector_ip='1.1.1.1', client_certificate='xyz') as dev: req = self.new_show_request(networkgw.GATEWAY_DEVICES, dev[self.dev_resource]['id']) res = self.deserialize('json', req.get_response(self.ext_api)) self.assertEqual(res[self.dev_resource]['name'], 'test-dev') self.assertEqual(res[self.dev_resource]['connector_type'], 'stt') self.assertEqual(res[self.dev_resource]['connector_ip'], '1.1.1.1') self.assertEqual(res[self.dev_resource]['status'], expected_status) def test_update_gateway_device( self, expected_status=networkgw_db.STATUS_UNKNOWN): with self._gateway_device(name='test-dev', connector_type='stt', connector_ip='1.1.1.1', client_certificate='xyz') as dev: self._update_gateway_device('json', dev[self.dev_resource]['id'], connector_type='stt', connector_ip='2.2.2.2', name='test-dev-upd') req = self.new_show_request(networkgw.GATEWAY_DEVICES, dev[self.dev_resource]['id']) res = self.deserialize('json', req.get_response(self.ext_api)) self.assertEqual(res[self.dev_resource]['name'], 'test-dev-upd') self.assertEqual(res[self.dev_resource]['connector_type'], 'stt') self.assertEqual(res[self.dev_resource]['connector_ip'], '2.2.2.2') self.assertEqual(res[self.dev_resource]['status'], expected_status) def test_delete_gateway_device(self): with self._gateway_device(name='test-dev', connector_type='stt', connector_ip='1.1.1.1', client_certificate='xyz') as dev: # Nothing to do here - just note the device id dev_id = dev[self.dev_resource]['id'] # Verify nothing left on db session = db_api.get_reader_session() dev_query = session.query(nsx_models.NetworkGatewayDevice) dev_query.filter(nsx_models.NetworkGatewayDevice.id == dev_id) self.assertIsNone(dev_query.first()) class TestNetworkGateway(test_nsx_plugin.NsxPluginV2TestCase, NetworkGatewayDbTestCase): def setUp(self, plugin=vmware.PLUGIN_NAME, ext_mgr=None): cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) # Mock l2gwlib calls for gateway devices since this resource is not # mocked through the fake NSX API client create_gw_dev_patcher = mock.patch.object( l2gwlib, 'create_gateway_device') update_gw_dev_patcher = mock.patch.object( l2gwlib, 'update_gateway_device') delete_gw_dev_patcher = mock.patch.object( l2gwlib, 'delete_gateway_device') get_gw_dev_status_patcher = mock.patch.object( l2gwlib, 'get_gateway_device_status') get_gw_dev_statuses_patcher = mock.patch.object( l2gwlib, 'get_gateway_devices_status') self.mock_create_gw_dev = create_gw_dev_patcher.start() self.mock_create_gw_dev.return_value = {'uuid': 'callejon'} self.mock_update_gw_dev = update_gw_dev_patcher.start() delete_gw_dev_patcher.start() self.mock_get_gw_dev_status = get_gw_dev_status_patcher.start() get_gw_dev_statuses = get_gw_dev_statuses_patcher.start() get_gw_dev_statuses.return_value = {} super(TestNetworkGateway, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def test_create_network_gateway_name_exceeds_40_chars(self): name = 'this_is_a_gateway_whose_name_is_longer_than_40_chars' with self._network_gateway(name=name) as nw_gw: # Assert Neutron name is not truncated self.assertEqual(nw_gw[self.gw_resource]['name'], name) def test_update_network_gateway_with_name_calls_backend(self): with mock.patch.object( nsxlib.l2gateway, 'update_l2_gw_service') as mock_update_gw: with self._network_gateway(name='cavani') as nw_gw: nw_gw_id = nw_gw[self.gw_resource]['id'] self._update(networkgw.NETWORK_GATEWAYS, nw_gw_id, {self.gw_resource: {'name': 'higuain'}}) mock_update_gw.assert_called_once_with( mock.ANY, nw_gw_id, 'higuain') def test_update_network_gateway_without_name_does_not_call_backend(self): with mock.patch.object( nsxlib.l2gateway, 'update_l2_gw_service') as mock_update_gw: with self._network_gateway(name='something') as nw_gw: nw_gw_id = nw_gw[self.gw_resource]['id'] self._update(networkgw.NETWORK_GATEWAYS, nw_gw_id, {self.gw_resource: {}}) self.assertEqual(mock_update_gw.call_count, 0) def test_update_network_gateway_name_exceeds_40_chars(self): new_name = 'this_is_a_gateway_whose_name_is_longer_than_40_chars' with self._network_gateway(name='something') as nw_gw: nw_gw_id = nw_gw[self.gw_resource]['id'] self._update(networkgw.NETWORK_GATEWAYS, nw_gw_id, {self.gw_resource: {'name': new_name}}) req = self.new_show_request(networkgw.NETWORK_GATEWAYS, nw_gw_id) res = self.deserialize('json', req.get_response(self.ext_api)) # Assert Neutron name is not truncated self.assertEqual(new_name, res[self.gw_resource]['name']) # Assert NSX name is truncated self.assertEqual( new_name[:40], self.fc._fake_gatewayservice_dict[nw_gw_id]['display_name']) def test_create_network_gateway_nsx_error_returns_500(self): def raise_nsx_api_exc(*args, **kwargs): raise api_exc.NsxApiException() with mock.patch.object(nsxlib.l2gateway, 'create_l2_gw_service', new=raise_nsx_api_exc): tenant_id = _uuid() with self._gateway_device(tenant_id=tenant_id) as dev: res = self._create_network_gateway( self.fmt, tenant_id, name='yyy', devices=[{'id': dev[self.dev_resource]['id']}]) self.assertEqual(500, res.status_int) def test_create_network_gateway_nsx_error_returns_409(self): with mock.patch.object(nsxlib.l2gateway, 'create_l2_gw_service', side_effect=api_exc.Conflict): tenant_id = _uuid() with self._gateway_device(tenant_id=tenant_id) as dev: res = self._create_network_gateway( self.fmt, tenant_id, name='yyy', devices=[{'id': dev[self.dev_resource]['id']}]) self.assertEqual(409, res.status_int) def test_list_network_gateways(self): with self._network_gateway(name='test-gw-1') as gw1: with self._network_gateway(name='test_gw_2') as gw2: req = self.new_list_request(networkgw.NETWORK_GATEWAYS) res = self.deserialize('json', req.get_response(self.ext_api)) # Ensure we always get the list in the same order gateways = sorted( res[self.gw_resource + 's'], key=lambda k: k['name']) self.assertEqual(len(gateways), 3) # We expect the default gateway too self.assertEqual(gateways[0]['default'], True) self.assertEqual(gateways[1]['name'], gw1[self.gw_resource]['name']) self.assertEqual(gateways[2]['name'], gw2[self.gw_resource]['name']) def test_list_network_gateway_with_multiple_connections(self): self._test_list_network_gateway_with_multiple_connections( expected_gateways=2) def test_show_network_gateway_nsx_error_returns_404(self): invalid_id = 'b5afd4a9-eb71-4af7-a082-8fc625a35b61' req = self.new_show_request(networkgw.NETWORK_GATEWAYS, invalid_id) res = req.get_response(self.ext_api) self.assertEqual(exc.HTTPNotFound.code, res.status_int) def test_create_gateway_device(self): self.mock_get_gw_dev_status.return_value = True super(TestNetworkGateway, self).test_create_gateway_device( expected_status=networkgw_db.STATUS_ACTIVE) def test_create_gateway_device_status_down(self): self.mock_get_gw_dev_status.return_value = False super(TestNetworkGateway, self).test_create_gateway_device( expected_status=networkgw_db.STATUS_DOWN) def test_create_gateway_device_invalid_cert_returns_400(self): self.mock_create_gw_dev.side_effect = ( nsx_exc.InvalidSecurityCertificate) res = self._create_gateway_device( 'json', _uuid(), connector_type='stt', connector_ip='1.1.1.1', client_certificate='invalid_certificate', name='whatever') self.assertEqual(res.status_int, 400) def test_get_gateway_device(self): self.mock_get_gw_dev_status.return_value = True super(TestNetworkGateway, self).test_get_gateway_device( expected_status=networkgw_db.STATUS_ACTIVE) def test_get_gateway_device_status_down(self): self.mock_get_gw_dev_status.return_value = False super(TestNetworkGateway, self).test_get_gateway_device( expected_status=networkgw_db.STATUS_DOWN) def test_update_gateway_device(self): self.mock_get_gw_dev_status.return_value = True super(TestNetworkGateway, self).test_update_gateway_device( expected_status=networkgw_db.STATUS_ACTIVE) def test_update_gateway_device_status_down(self): self.mock_get_gw_dev_status.return_value = False super(TestNetworkGateway, self).test_update_gateway_device( expected_status=networkgw_db.STATUS_DOWN) def test_update_gateway_device_invalid_cert_returns_400(self): with self._gateway_device( name='whaterver', connector_type='stt', connector_ip='1.1.1.1', client_certificate='iminvalidbutiitdoesnotmatter') as dev: self.mock_update_gw_dev.side_effect = ( nsx_exc.InvalidSecurityCertificate) res = self._update_gateway_device( 'json', dev[self.dev_resource]['id'], client_certificate='invalid_certificate') self.assertEqual(res.status_int, 400) class TestNetworkGatewayPlugin(db_base_plugin_v2.NeutronDbPluginV2, networkgw_db.NetworkGatewayMixin): """Simple plugin class for testing db support for network gateway ext.""" supported_extension_aliases = ["network-gateway"] def __init__(self, **args): super(TestNetworkGatewayPlugin, self).__init__(**args) extensions.append_api_extensions_path([vmware.NSXEXT_PATH]) def delete_port(self, context, id, nw_gw_port_check=True): if nw_gw_port_check: port = self._get_port(context, id) self.prevent_network_gateway_port_deletion(context, port) super(TestNetworkGatewayPlugin, self).delete_port(context, id) vmware-nsx-12.0.1/vmware_nsx/tests/unit/extensions/test_portsecurity.py0000666000175100017510000000420213244523345026631 0ustar zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests.unit.extensions import test_portsecurity as psec from vmware_nsx.common import sync from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsx_mh.apiclient import fake from vmware_nsx.tests.unit.nsx_v3 import test_constants as v3_constants from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsxv3 from vmware_nsx.tests.unit import test_utils class PortSecurityTestCaseNSXv2(psec.PortSecurityDBTestCase): def setUp(self): test_utils.override_nsx_ini_test() # mock api client self.fc = fake.FakeClient(vmware.STUBS_PATH) self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True) instance = self.mock_nsx.start() instance.return_value.login.return_value = "the_cookie" # Avoid runs of the synchronizer looping call patch_sync = mock.patch.object(sync, '_start_loopingcall') patch_sync.start() instance.return_value.request.side_effect = self.fc.fake_request super(PortSecurityTestCaseNSXv2, self).setUp(vmware.PLUGIN_NAME) self.addCleanup(self.fc.reset_all) self.addCleanup(self.mock_nsx.stop) self.addCleanup(patch_sync.stop) class TestPortSecurityNSXv2(PortSecurityTestCaseNSXv2, psec.TestPortSecurity): pass class TestPortSecurityNSXv3(psec.TestPortSecurity, test_nsxv3.NsxV3PluginTestCaseMixin): def setUp(self, plugin=v3_constants.PLUGIN_NAME): super(TestPortSecurityNSXv3, self).setUp(plugin=plugin) vmware-nsx-12.0.1/vmware_nsx/tests/unit/extensions/test_dhcp_mtu.py0000666000175100017510000001753113244523345025671 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import neutron.db.api as db from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import dhcp_mtu as ext_dhcp_mtu from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.tests.unit.nsx_v import test_plugin from vmware_nsx.tests.unit.nsx_v.vshield import fake_vcns PLUGIN_NAME = 'vmware_nsx.plugin.NsxVPlugin' class DhcpMtuExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): return ext_dhcp_mtu.get_extended_resources(version) class DhcpMtuExtensionTestCase(test_plugin.NsxVPluginV2TestCase): """Test API extension dhcp-mtu attribute of subnets.""" @mock.patch.object(edge_utils.EdgeManager, '_deploy_edge') def setUp(self, plugin=PLUGIN_NAME): ext_mgr = DhcpMtuExtensionManager() # This feature is enabled only since 6.2.3 with mock.patch.object(fake_vcns.FakeVcns, 'get_version', return_value="6.2.3"): super(DhcpMtuExtensionTestCase, self).setUp(ext_mgr=ext_mgr) def _create_subnet_with_dhcp_mtu(self, dhcp_mtu): with self.network() as net: tenant_id = net['network']['tenant_id'] net_id = net['network']['id'] data = {'subnet': {'network_id': net_id, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'name': 'test-mtu-subnet', 'tenant_id': tenant_id, 'dhcp_mtu': dhcp_mtu}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) return res def test_subnet_create_with_dhcp_mtu(self): for mtu in (68, 2000, 65535): res = self._create_subnet_with_dhcp_mtu(mtu) sub = self.deserialize(self.fmt, res) self.assertEqual(mtu, sub['subnet']['dhcp_mtu']) def test_subnet_create_with_invalid_dhcp_mtu_fail(self): res = self._create_subnet_with_dhcp_mtu(67) self.assertEqual(400, res.status_int) res = self._create_subnet_with_dhcp_mtu(100000) self.assertEqual(400, res.status_int) def test_subnet_update_with_dhcp_mtu(self): res = self._create_subnet_with_dhcp_mtu(2000) sub = self.deserialize(self.fmt, res) data = {'subnet': {'dhcp_mtu': 3000}} req = self.new_update_request('subnets', data, sub['subnet']['id']) updated_sub = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(3000, updated_sub['subnet']['dhcp_mtu']) def _create_subnet_with_dhcp_mtu_and_dns(self, dhcp_mtu, dns_search_domain): with self.network() as net: tenant_id = net['network']['tenant_id'] net_id = net['network']['id'] data = {'subnet': {'network_id': net_id, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'name': 'test-mtu-subnet', 'tenant_id': tenant_id, 'dhcp_mtu': dhcp_mtu, 'dns_search_domain': dns_search_domain}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) return res def test_subnet_create_with_dhcp_mtu_and_dns(self): res = self._create_subnet_with_dhcp_mtu_and_dns(2000, 'vmware.com') sub = self.deserialize(self.fmt, res) self.assertEqual(2000, sub['subnet']['dhcp_mtu']) self.assertEqual('vmware.com', sub['subnet']['dns_search_domain']) def test_subnet_update_with_dhcp_mtu_and_dns(self): res = self._create_subnet_with_dhcp_mtu_and_dns(2000, 'vmware.com') sub = self.deserialize(self.fmt, res) data = {'subnet': {'dhcp_mtu': 3000, 'dns_search_domain': 'eng.vmware.com'}} req = self.new_update_request('subnets', data, sub['subnet']['id']) updated_sub = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(3000, updated_sub['subnet']['dhcp_mtu']) self.assertEqual('eng.vmware.com', updated_sub['subnet']['dns_search_domain']) class DhcpMtuDBTestCase(test_db.NeutronDbPluginV2TestCase): def setUp(self): super(DhcpMtuDBTestCase, self).setUp() self.session = db.get_writer_session() def test_get_nsxv_subnet_ext_attributes_no_dhcp_mtu(self): with self.subnet() as sub: sub_binding = nsxv_db.get_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id']) self.assertIsNone(sub_binding) def test_add_nsxv_subnet_ext_attributes_dhcp_mtu(self): with self.subnet() as sub: nsxv_db.add_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dhcp_mtu=2000) sub_binding = nsxv_db.get_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id']) self.assertEqual(2000, sub_binding.dhcp_mtu) self.assertEqual(sub['subnet']['id'], sub_binding.subnet_id) def test_update_nsxv_subnet_ext_attributes_dhcp_mtu(self): with self.subnet() as sub: nsxv_db.add_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dhcp_mtu=2000) sub_binding = nsxv_db.update_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dhcp_mtu=3000) self.assertEqual(3000, sub_binding.dhcp_mtu) def test_add_nsxv_subnet_ext_attributes_dhcp_mtu_and_dns(self): with self.subnet() as sub: nsxv_db.add_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dhcp_mtu=2000, dns_search_domain='eng.vmware.com') sub_binding = nsxv_db.get_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id']) self.assertEqual(2000, sub_binding.dhcp_mtu) self.assertEqual('eng.vmware.com', sub_binding.dns_search_domain) self.assertEqual(sub['subnet']['id'], sub_binding.subnet_id) def test_update_nsxv_subnet_ext_attributes_dhcp_mtu_and_dns(self): with self.subnet() as sub: nsxv_db.add_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dhcp_mtu=2000, dns_search_domain='eng.vmware.com') sub_binding = nsxv_db.update_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dhcp_mtu=3000, dns_search_domain='nsx.vmware.com') self.assertEqual(3000, sub_binding.dhcp_mtu) self.assertEqual('nsx.vmware.com', sub_binding.dns_search_domain) vmware-nsx-12.0.1/vmware_nsx/tests/unit/extensions/test_providernet.py0000666000175100017510000002162713244523345026430 0ustar zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import webob.exc from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef from neutron_lib.api.definitions import provider_net as pnet from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsx_mh import test_plugin as test_nsx_plugin class TestProvidernet(test_nsx_plugin.NsxPluginV2TestCase): def test_create_delete_provider_network_default_physical_net(self): data = {'network': {'name': 'net1', 'admin_state_up': True, 'tenant_id': 'admin', pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 411}} network_req = self.new_create_request('networks', data, self.fmt) net = self.deserialize(self.fmt, network_req.get_response(self.api)) self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan') self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411) req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) def test_create_provider_network(self): data = {'network': {'name': 'net1', 'admin_state_up': True, 'tenant_id': 'admin', pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 411, pnet.PHYSICAL_NETWORK: 'physnet1'}} network_req = self.new_create_request('networks', data, self.fmt) net = self.deserialize(self.fmt, network_req.get_response(self.api)) self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan') self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411) self.assertEqual(net['network'][pnet.PHYSICAL_NETWORK], 'physnet1') # Test that we can create another provider network using the same # vlan_id on another physical network. data['network'][pnet.PHYSICAL_NETWORK] = 'physnet2' network_req = self.new_create_request('networks', data, self.fmt) net = self.deserialize(self.fmt, network_req.get_response(self.api)) self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan') self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411) self.assertEqual(net['network'][pnet.PHYSICAL_NETWORK], 'physnet2') class TestMultiProviderNetworks(test_nsx_plugin.NsxPluginV2TestCase): def setUp(self, plugin=None): cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) super(TestMultiProviderNetworks, self).setUp() def test_create_network_provider(self): data = {'network': {'name': 'net1', pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) self.assertEqual(network['network'][pnet.NETWORK_TYPE], 'vlan') self.assertEqual(network['network'][pnet.PHYSICAL_NETWORK], 'physnet1') self.assertEqual(network['network'][pnet.SEGMENTATION_ID], 1) self.assertNotIn(mpnet_apidef.SEGMENTS, network['network']) def test_create_network_provider_flat(self): data = {'network': {'name': 'net1', pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: 'physnet1', 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) self.assertEqual('flat', network['network'][pnet.NETWORK_TYPE]) self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK]) self.assertEqual(0, network['network'][pnet.SEGMENTATION_ID]) self.assertNotIn(mpnet_apidef.SEGMENTS, network['network']) def test_create_network_single_multiple_provider(self): data = {'network': {'name': 'net1', mpnet_apidef.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}], 'tenant_id': 'tenant_one'}} net_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, net_req.get_response(self.api)) for provider_field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID]: self.assertNotIn(provider_field, network['network']) tz = network['network'][mpnet_apidef.SEGMENTS][0] self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan') self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1') self.assertEqual(tz[pnet.SEGMENTATION_ID], 1) # Tests get_network() net_req = self.new_show_request('networks', network['network']['id']) network = self.deserialize(self.fmt, net_req.get_response(self.api)) tz = network['network'][mpnet_apidef.SEGMENTS][0] self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan') self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1') self.assertEqual(tz[pnet.SEGMENTATION_ID], 1) def test_create_network_multprovider(self): data = {'network': {'name': 'net1', mpnet_apidef.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}, {pnet.NETWORK_TYPE: 'stt', pnet.PHYSICAL_NETWORK: 'physnet1'}], 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) tz = network['network'][mpnet_apidef.SEGMENTS] for tz in data['network'][mpnet_apidef.SEGMENTS]: for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID]: self.assertEqual(tz.get(field), tz.get(field)) # Tests get_network() net_req = self.new_show_request('networks', network['network']['id']) network = self.deserialize(self.fmt, net_req.get_response(self.api)) tz = network['network'][mpnet_apidef.SEGMENTS] for tz in data['network'][mpnet_apidef.SEGMENTS]: for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID]: self.assertEqual(tz.get(field), tz.get(field)) def test_create_network_with_provider_and_multiprovider_fail(self): data = {'network': {'name': 'net1', mpnet_apidef.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}], pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) res = network_req.get_response(self.api) self.assertEqual(res.status_int, 400) def test_create_network_duplicate_segments(self): data = {'network': {'name': 'net1', mpnet_apidef.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}, {pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}], 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) res = network_req.get_response(self.api) self.assertEqual(res.status_int, 400) vmware-nsx-12.0.1/vmware_nsx/tests/unit/extensions/test_maclearning.py0000666000175100017510000001261013244523345026337 0ustar zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.extensions import agent from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_plugin from neutron_lib import context from oslo_config import cfg from vmware_nsx.api_client import version from vmware_nsx.common import sync from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsx_mh.apiclient import fake from vmware_nsx.tests.unit import test_utils class MacLearningExtensionManager(object): def get_resources(self): return agent.Agent.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class MacLearningDBTestCase(test_db_plugin.NeutronDbPluginV2TestCase): fmt = 'json' def setUp(self): test_utils.override_nsx_ini_full_test() cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) ext_mgr = MacLearningExtensionManager() # mock api client self.fc = fake.FakeClient(vmware.STUBS_PATH) self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True) instance = self.mock_nsx.start() # Avoid runs of the synchronizer looping call patch_sync = mock.patch.object(sync, '_start_loopingcall') patch_sync.start() # Emulate tests against NSX 2.x instance.return_value.get_version.return_value = version.Version("3.0") instance.return_value.request.side_effect = self.fc.fake_request cfg.CONF.set_override('metadata_mode', None, 'NSX') self.addCleanup(self.fc.reset_all) super(MacLearningDBTestCase, self).setUp(plugin=vmware.PLUGIN_NAME, ext_mgr=ext_mgr) self.adminContext = context.get_admin_context() def test_create_with_mac_learning(self): with self.port(arg_list=('mac_learning_enabled',), mac_learning_enabled=True) as port: # Validate create operation response self.assertEqual(True, port['port']['mac_learning_enabled']) # Verify that db operation successfully set mac learning state req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(True, sport['port']['mac_learning_enabled']) def test_create_and_show_port_without_mac_learning(self): with self.port() as port: req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertNotIn('mac_learning_enabled', sport['port']) def test_update_port_with_mac_learning(self): with self.port(arg_list=('mac_learning_enabled',), mac_learning_enabled=False) as port: data = {'port': {'mac_learning_enabled': True}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(True, res['port']['mac_learning_enabled']) def test_update_preexisting_port_with_mac_learning(self): with self.port() as port: req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertNotIn('mac_learning_enabled', sport['port']) data = {'port': {'mac_learning_enabled': True}} req = self.new_update_request('ports', data, port['port']['id']) # Validate update operation response res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(True, res['port']['mac_learning_enabled']) # Verify that db operation successfully updated mac learning state req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(True, sport['port']['mac_learning_enabled']) def test_list_ports(self): # for this test we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) no_mac_learning_p = (lambda: self.port(arg_list=('mac_learning_enabled',), mac_learning_enabled=True)) with no_mac_learning_p(), no_mac_learning_p(), no_mac_learning_p(): for port in self._list('ports')['ports']: self.assertEqual(True, port['mac_learning_enabled']) def test_show_port(self): with self.port(arg_list=('mac_learning_enabled',), mac_learning_enabled=True) as p: port_res = self._show('ports', p['port']['id'])['port'] self.assertEqual(True, port_res['mac_learning_enabled']) vmware-nsx-12.0.1/vmware_nsx/tests/unit/extensions/test_security_group_policy.py0000666000175100017510000002455713244523345030536 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg import webob.exc from neutron.extensions import securitygroup as ext_sg from neutron.tests.unit.api import test_extensions from neutron.tests.unit.extensions import test_securitygroup from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from vmware_nsx.extensions import nsxpolicy from vmware_nsx.extensions import securitygrouplogging as ext_logging from vmware_nsx.extensions import securitygrouppolicy as ext_policy from vmware_nsx.tests.unit.nsx_v import test_plugin from vmware_nsx.tests.unit.nsx_v.vshield import fake_vcns PLUGIN_NAME = 'vmware_nsx.plugin.NsxVPlugin' class SecGroupPolicyExtensionTestCase( test_plugin.NsxVPluginV2TestCase, test_securitygroup.SecurityGroupDBTestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None): cfg.CONF.set_override('use_nsx_policies', True, group='nsxv') cfg.CONF.set_override('default_policy_id', 'policy-1', group='nsxv') # This feature is enabled only since 6.2 with mock.patch.object(fake_vcns.FakeVcns, 'get_version', return_value="6.2.3"): super(SecGroupPolicyExtensionTestCase, self).setUp( plugin=plugin, ext_mgr=ext_mgr) self._tenant_id = 'foobar' # add policy & logging security group attribute ext_sg.Securitygroup().update_attributes_map( ext_policy.RESOURCE_ATTRIBUTE_MAP) ext_sg.Securitygroup().update_attributes_map( ext_logging.RESOURCE_ATTRIBUTE_MAP) def _create_secgroup_with_policy(self, policy_id, description=None, logging=False): body = {'security_group': {'name': 'sg-policy', 'tenant_id': self._tenant_id, 'policy': policy_id, 'description': description if description else '', 'logging': logging}} security_group_req = self.new_create_request('security-groups', body) return security_group_req.get_response(self.ext_api) def _get_secgroup_with_policy(self): policy_id = 'policy-5' res = self._create_secgroup_with_policy(policy_id) return self.deserialize(self.fmt, res) def test_secgroup_create_with_policy(self): policy_id = 'policy-5' res = self._create_secgroup_with_policy(policy_id) sg = self.deserialize(self.fmt, res) self.assertEqual(policy_id, sg['security_group']['policy']) self.assertEqual('dummy', sg['security_group']['description']) def test_secgroup_create_with_policyand_desc(self): policy_id = 'policy-5' desc = 'test' res = self._create_secgroup_with_policy(policy_id, description=desc) sg = self.deserialize(self.fmt, res) self.assertEqual(policy_id, sg['security_group']['policy']) self.assertEqual(desc, sg['security_group']['description']) def test_secgroup_create_without_policy(self): res = self._create_secgroup_with_policy(None) self.assertEqual(400, res.status_int) def test_secgroup_create_with_illegal_policy(self): policy_id = 'bad-policy' with mock.patch(PLUGIN_NAME + '.get_nsx_policy', side_effect=n_exc.ObjectNotFound(id=policy_id)): res = self._create_secgroup_with_policy(policy_id) self.assertEqual(400, res.status_int) def test_secgroup_create_with_policy_and_logging(self): # We do not support policy & logging together policy_id = 'policy-5' res = self._create_secgroup_with_policy(policy_id, logging=True) self.assertEqual(400, res.status_int) def test_secgroup_update_with_policy(self): # Test that updating the policy is allowed old_policy = 'policy-5' new_policy = 'policy-6' res = self._create_secgroup_with_policy(old_policy) sg = self.deserialize(self.fmt, res) data = {'security_group': {'policy': new_policy}} req = self.new_update_request('security-groups', data, sg['security_group']['id']) updated_sg = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(new_policy, updated_sg['security_group']['policy']) def test_secgroup_update_no_policy_change(self): # Test updating without changing the policy old_policy = 'policy-5' desc = 'abc' res = self._create_secgroup_with_policy(old_policy) sg = self.deserialize(self.fmt, res) data = {'security_group': {'description': desc}} req = self.new_update_request('security-groups', data, sg['security_group']['id']) updated_sg = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(old_policy, updated_sg['security_group']['policy']) self.assertEqual(desc, updated_sg['security_group']['description']) def test_secgroup_update_remove_policy(self): # removing the policy is not allowed sg = self._get_secgroup_with_policy() data = {'security_group': {'policy': None}} req = self.new_update_request('security-groups', data, sg['security_group']['id']) res = req.get_response(self.ext_api) self.assertEqual(400, res.status_int) def test_secgroup_update_add_logging(self): # We do not support policy & logging together sg = self._get_secgroup_with_policy() data = {'security_group': {'logging': True}} req = self.new_update_request('security-groups', data, sg['security_group']['id']) res = req.get_response(self.ext_api) self.assertEqual(400, res.status_int) def test_non_admin_cannot_delete_policy_sg_and_admin_can(self): sg = self._get_secgroup_with_policy() sg_id = sg['security_group']['id'] # Try deleting the request as a normal user returns forbidden # as a tenant is not allowed to delete this. ctx = context.Context('', self._tenant_id) self._delete('security-groups', sg_id, expected_code=webob.exc.HTTPForbidden.code, neutron_context=ctx) # can be deleted though as admin self._delete('security-groups', sg_id, expected_code=webob.exc.HTTPNoContent.code) def test_create_rule(self): sg = self._get_secgroup_with_policy() rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', constants.PROTO_NAME_TCP, '22', '22') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(400, res.status_int) class SecGroupPolicyExtensionTestCaseWithRules( SecGroupPolicyExtensionTestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None): cfg.CONF.set_override('allow_tenant_rules_with_policy', True, group='nsxv') super(SecGroupPolicyExtensionTestCaseWithRules, self).setUp( plugin=plugin, ext_mgr=ext_mgr) def test_secgroup_create_without_policy(self): # in case allow_tenant_rules_with_policy is True, it is allowed to # create a regular sg desc = 'test' res = self._create_secgroup_with_policy(None, description=desc) sg = self.deserialize(self.fmt, res) self.assertIsNone(sg['security_group']['policy']) self.assertEqual(desc, sg['security_group']['description']) def test_secgroup_create_without_policy_update_policy(self): # Create a regular security group. adding the policy later should fail res = self._create_secgroup_with_policy(None) sg = self.deserialize(self.fmt, res) data = {'security_group': {'policy': 'policy-1'}} req = self.new_update_request('security-groups', data, sg['security_group']['id']) res = req.get_response(self.ext_api) self.assertEqual(400, res.status_int) def test_secgroup_create_without_policy_and_rule(self): # Test that regular security groups can have rules res = self._create_secgroup_with_policy(None) sg = self.deserialize(self.fmt, res) self.assertIsNone(sg['security_group']['policy']) rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', constants.PROTO_NAME_TCP, '22', '22') res = self._create_security_group_rule(self.fmt, rule) rule_data = self.deserialize(self.fmt, res) self.assertEqual( sg['security_group']['id'], rule_data['security_group_rule']['security_group_id']) class NsxPolExtensionManager(object): def get_resources(self): return nsxpolicy.Nsxpolicy.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class TestNsxPolicies(test_plugin.NsxVPluginV2TestCase): def setUp(self, plugin=None): super(TestNsxPolicies, self).setUp() ext_mgr = NsxPolExtensionManager() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) def test_get_policy(self): id = 'policy-1' req = self.new_show_request('nsx-policies', id) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) policy = res['nsx_policy'] self.assertEqual(id, policy['id']) def test_list_policies(self): req = self.new_list_request('nsx-policies') res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self.assertIn('nsx_policies', res) # the fake_vcns api returns 3 policies self.assertEqual(3, len(res['nsx_policies'])) vmware-nsx-12.0.1/vmware_nsx/tests/unit/extensions/test_addresspairs.py0000666000175100017510000002075413244523345026553 0ustar zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.api.definitions import port_security as psec from oslo_config import cfg from neutron.tests.unit.db import test_allowedaddresspairs_db as ext_pairs from vmware_nsx.tests.unit.nsx_mh import test_plugin as test_nsx_plugin from vmware_nsx.tests.unit.nsx_v import test_plugin as test_nsx_v_plugin from vmware_nsx.tests.unit.nsx_v3 import test_constants as v3_constants from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_v3_plugin class TestAllowedAddressPairsNSXv2(test_nsx_plugin.NsxPluginV2TestCase, ext_pairs.TestAllowedAddressPairs): # TODO(arosen): move to ext_pairs.TestAllowedAddressPairs once all # plugins do this correctly. def test_create_port_no_allowed_address_pairs(self): with self.network() as net: res = self._create_port(self.fmt, net['network']['id']) port = self.deserialize(self.fmt, res) self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], []) self._delete('ports', port['port']['id']) def test_create_port_security_false_allowed_address_pairs(self): self.skipTest('TBD') class TestAllowedAddressPairsNSXv3(test_v3_plugin.NsxV3PluginTestCaseMixin, ext_pairs.TestAllowedAddressPairs): def setUp(self, plugin=v3_constants.PLUGIN_NAME, ext_mgr=None, service_plugins=None): super(TestAllowedAddressPairsNSXv3, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) def test_create_bad_address_pairs_with_cidr(self): address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1/24'}] self._create_port_with_address_pairs(address_pairs, 400) def test_update_add_bad_address_pairs_with_cidr(self): with self.network() as net: res = self._create_port(self.fmt, net['network']['id']) port = self.deserialize(self.fmt, res) address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1/24'}] update_port = {'port': {addr_apidef.ADDRESS_PAIRS: address_pairs}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 400) self._delete('ports', port['port']['id']) def test_create_port_security_false_allowed_address_pairs(self): self.skipTest('TBD') class TestAllowedAddressPairsNSXv(test_nsx_v_plugin.NsxVPluginV2TestCase, ext_pairs.TestAllowedAddressPairs): def setUp(self, plugin='vmware_nsx.plugin.NsxVPlugin', ext_mgr=None, service_plugins=None): super(TestAllowedAddressPairsNSXv, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) def test_create_port_security_false_allowed_address_pairs(self): self.skipTest('TBD') def test_update_port_security_off_address_pairs(self): self.skipTest('Not supported') def test_create_overlap_with_fixed_ip(self): address_pairs = [{'ip_address': '10.0.0.2'}] with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=False) as subnet: fixed_ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.2'}] res = self._create_port(self.fmt, network['network']['id'], arg_list=(addr_apidef.ADDRESS_PAIRS, 'fixed_ips'), allowed_address_pairs=address_pairs, fixed_ips=fixed_ips) self.assertEqual(res.status_int, 201) port = self.deserialize(self.fmt, res) self._delete('ports', port['port']['id']) def test_create_port_allowed_address_pairs(self): with self.network() as net: address_pairs = [{'ip_address': '10.0.0.1'}] res = self._create_port(self.fmt, net['network']['id'], arg_list=(addr_apidef.ADDRESS_PAIRS,), allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res) address_pairs[0]['mac_address'] = port['port']['mac_address'] self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], address_pairs) self._delete('ports', port['port']['id']) def _test_create_port_remove_allowed_address_pairs(self, update_value): with self.network() as net: address_pairs = [{'ip_address': '10.0.0.1'}] res = self._create_port(self.fmt, net['network']['id'], arg_list=(addr_apidef.ADDRESS_PAIRS,), allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res) update_port = {'port': {addr_apidef.ADDRESS_PAIRS: []}} req = self.new_update_request('ports', update_port, port['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], []) self._delete('ports', port['port']['id']) def test_update_add_address_pairs(self): with self.network() as net: res = self._create_port(self.fmt, net['network']['id']) port = self.deserialize(self.fmt, res) address_pairs = [{'ip_address': '10.0.0.1'}] update_port = {'port': {addr_apidef.ADDRESS_PAIRS: address_pairs}} req = self.new_update_request('ports', update_port, port['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) address_pairs[0]['mac_address'] = port['port']['mac_address'] self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], address_pairs) self._delete('ports', port['port']['id']) def test_mac_configuration(self): address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}] self._create_port_with_address_pairs(address_pairs, 400) def test_equal_to_max_allowed_address_pair(self): cfg.CONF.set_default('max_allowed_address_pair', 3) address_pairs = [{'ip_address': '10.0.0.1'}, {'ip_address': '10.0.0.2'}, {'ip_address': '10.0.0.3'}] self._create_port_with_address_pairs(address_pairs, 201) def test_create_port_security_true_allowed_address_pairs(self): if self._skip_port_security: self.skipTest("Plugin does not implement port-security extension") with self.network() as net: address_pairs = [{'ip_address': '10.0.0.1'}] res = self._create_port(self.fmt, net['network']['id'], arg_list=('port_security_enabled', addr_apidef.ADDRESS_PAIRS,), port_security_enabled=True, allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res) self.assertTrue(port['port'][psec.PORTSECURITY]) address_pairs[0]['mac_address'] = port['port']['mac_address'] self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], address_pairs) self._delete('ports', port['port']['id']) vmware-nsx-12.0.1/vmware_nsx/tests/unit/extensions/test_vnic_index.py0000666000175100017510000001363113244523345026211 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db import exception as d_exc from oslo_utils import uuidutils from neutron.db import api as db_api from neutron.db import db_base_plugin_v2 from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_plugin from neutron_lib.api import validators from neutron_lib import context as neutron_context from neutron_lib.plugins import directory from vmware_nsx.db import vnic_index_db from vmware_nsx.extensions import vnicindex as vnicidx from vmware_nsx.tests import unit as vmware DB_PLUGIN_KLASS = ('vmware_nsx.tests.unit.extensions.' 'test_vnic_index.VnicIndexTestPlugin') _uuid = uuidutils.generate_uuid class VnicIndexTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, vnic_index_db.VnicIndexDbMixin): supported_extension_aliases = ["vnic-index"] def update_port(self, context, id, port): p = port['port'] current_port = super(VnicIndexTestPlugin, self).get_port(context, id) vnic_idx = p.get(vnicidx.VNIC_INDEX) device_id = current_port['device_id'] if validators.is_attr_set(vnic_idx) and device_id != '': self._set_port_vnic_index_mapping( context, id, device_id, vnic_idx) with db_api.context_manager.writer.using(context): p = port['port'] ret_port = super(VnicIndexTestPlugin, self).update_port( context, id, port) vnic_idx = current_port.get(vnicidx.VNIC_INDEX) if (validators.is_attr_set(vnic_idx) and device_id != ret_port['device_id']): self._delete_port_vnic_index_mapping( context, id) return ret_port def delete_port(self, context, id): port_db = self.get_port(context, id) vnic_idx = port_db.get(vnicidx.VNIC_INDEX) if validators.is_attr_set(vnic_idx): self._delete_port_vnic_index_mapping(context, id) with db_api.context_manager.writer.using(context): super(VnicIndexTestPlugin, self).delete_port(context, id) class VnicIndexDbTestCase(test_db_plugin.NeutronDbPluginV2TestCase): def setUp(self, plugin=None, ext_mgr=None): plugin = plugin or DB_PLUGIN_KLASS cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) super(VnicIndexDbTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def _port_index_update(self, port_id, index): data = {'port': {'vnic_index': index}} req = self.new_update_request('ports', data, port_id) res = self.deserialize('json', req.get_response(self.api)) return res def test_vnic_index_db(self): plugin = directory.get_plugin() vnic_index = 2 device_id = _uuid() context = neutron_context.get_admin_context() with self.port(device_id=device_id, device_owner='compute:None') as port: port_id = port['port']['id'] res = self._port_index_update(port_id, vnic_index) self.assertEqual(res['port'][vnicidx.VNIC_INDEX], vnic_index) # Port should be associated with at most one vnic index self.assertRaises(d_exc.DBDuplicateEntry, plugin._set_port_vnic_index_mapping, context, port_id, device_id, 1) # Check that the call for _delete_port_vnic_index_mapping remove # the row from the table plugin._delete_port_vnic_index_mapping(context, port_id) self.assertIsNone(plugin._get_port_vnic_index(context, port_id)) def test_vnic_index_db_duplicate(self): plugin = directory.get_plugin() vnic_index = 2 device_id = _uuid() context = neutron_context.get_admin_context() with self.port(device_id=device_id, device_owner='compute:None') as port: port_id = port['port']['id'] res = self._port_index_update(port_id, vnic_index) self.assertEqual(res['port'][vnicidx.VNIC_INDEX], vnic_index) plugin._set_port_vnic_index_mapping(context, port_id, device_id, vnic_index) def test_vnic_index_db_duplicate_new_port(self): plugin = directory.get_plugin() vnic_index = 2 device_id = _uuid() context = neutron_context.get_admin_context() with self.port(device_id=device_id, device_owner='compute:None') as port: with self.port(device_id=device_id, device_owner='compute:None') as port1: port_id = port['port']['id'] res = self._port_index_update(port_id, vnic_index) self.assertEqual(res['port'][vnicidx.VNIC_INDEX], vnic_index) port_id1 = port1['port']['id'] plugin._set_port_vnic_index_mapping(context, port_id1, device_id, 2) self.assertIsNone(plugin._get_port_vnic_index(context, port_id)) self.assertEqual(vnic_index, plugin._get_port_vnic_index(context, port_id1)) class TestVnicIndex(VnicIndexDbTestCase): pass vmware-nsx-12.0.1/vmware_nsx/tests/unit/extensions/__init__.py0000666000175100017510000000000013244523345024545 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/extensions/test_securitygroup.py0000666000175100017510000001763213244523345027014 0ustar zuulzuul00000000000000# Copyright (c) 2015 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.extensions import securitygroup as ext_sg from neutron.tests.unit.extensions import test_securitygroup as test_ext_sg from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsxv3 from vmware_nsxlib import v3 as nsxlib from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import nsx_constants as consts # Pool of fake ns-groups uuids NSG_IDS = ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444', '55555555-5555-5555-5555-555555555555'] def _mock_create_and_list_nsgroups(test_method): nsgroups = [] def _create_nsgroup_mock(name, desc, tags, membership_criteria=None): nsgroup = {'id': NSG_IDS[len(nsgroups)], 'display_name': name, 'description': desc, 'tags': tags} nsgroups.append(nsgroup) return nsgroup def wrap(*args, **kwargs): with mock.patch( 'vmware_nsxlib.v3.security.NsxLibNsGroup.create' ) as create_nsgroup_mock: create_nsgroup_mock.side_effect = _create_nsgroup_mock with mock.patch( "vmware_nsxlib.v3.security.NsxLibNsGroup.list" ) as list_nsgroups_mock: list_nsgroups_mock.side_effect = lambda: nsgroups test_method(*args, **kwargs) return wrap class TestSecurityGroups(test_nsxv3.NsxV3PluginTestCaseMixin, test_ext_sg.TestSecurityGroups): pass class TestSecurityGroupsNoDynamicCriteria(test_nsxv3.NsxV3PluginTestCaseMixin, test_ext_sg.TestSecurityGroups): def setUp(self): super(TestSecurityGroupsNoDynamicCriteria, self).setUp() mock_nsx_version = mock.patch.object( nsxlib.NsxLib, 'feature_supported', return_value=False) mock_nsx_version.start() self._patchers.append(mock_nsx_version) @_mock_create_and_list_nsgroups @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.remove_member') @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.add_members') def test_create_port_with_multiple_security_groups(self, add_member_mock, remove_member_mock): super(TestSecurityGroupsNoDynamicCriteria, self).test_create_port_with_multiple_security_groups() # The first nsgroup is associated with the default secgroup, which is # not added to this port. calls = [mock.call(NSG_IDS[1], consts.TARGET_TYPE_LOGICAL_PORT, mock.ANY), mock.call(NSG_IDS[2], consts.TARGET_TYPE_LOGICAL_PORT, mock.ANY)] add_member_mock.assert_has_calls(calls, any_order=True) @_mock_create_and_list_nsgroups @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.remove_member') @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.add_members') def test_update_port_with_multiple_security_groups(self, add_member_mock, remove_member_mock): super(TestSecurityGroupsNoDynamicCriteria, self).test_update_port_with_multiple_security_groups() calls = [mock.call(NSG_IDS[0], consts.TARGET_TYPE_LOGICAL_PORT, mock.ANY), mock.call(NSG_IDS[1], consts.TARGET_TYPE_LOGICAL_PORT, mock.ANY), mock.call(NSG_IDS[2], consts.TARGET_TYPE_LOGICAL_PORT, mock.ANY)] add_member_mock.assert_has_calls(calls, any_order=True) remove_member_mock.assert_called_with( NSG_IDS[0], consts.TARGET_TYPE_LOGICAL_PORT, mock.ANY) @_mock_create_and_list_nsgroups @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.remove_member') @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.add_members') def test_update_port_remove_security_group_empty_list(self, add_member_mock, remove_member_mock): super(TestSecurityGroupsNoDynamicCriteria, self).test_update_port_remove_security_group_empty_list() add_member_mock.assert_called_with( NSG_IDS[1], consts.TARGET_TYPE_LOGICAL_PORT, mock.ANY) remove_member_mock.assert_called_with( NSG_IDS[1], consts.TARGET_TYPE_LOGICAL_PORT, mock.ANY) @_mock_create_and_list_nsgroups @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.add_members') def test_create_port_with_full_security_group(self, add_member_mock): def _add_member_mock(nsgroup, target_type, target_id): if nsgroup in NSG_IDS: raise nsxlib_exc.NSGroupIsFull(nsgroup_id=nsgroup) add_member_mock.side_effect = _add_member_mock with self.network() as net: with self.subnet(net): res = self._create_port(self.fmt, net['network']['id']) res_body = self.deserialize(self.fmt, res) self.assertEqual(400, res.status_int) self.assertEqual('SecurityGroupMaximumCapacityReached', res_body['NeutronError']['type']) @_mock_create_and_list_nsgroups @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.remove_member') @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.add_members') def test_update_port_with_full_security_group(self, add_member_mock, remove_member_mock): def _add_member_mock(nsgroup, target_type, target_id): if nsgroup == NSG_IDS[2]: raise nsxlib_exc.NSGroupIsFull(nsgroup_id=nsgroup) add_member_mock.side_effect = _add_member_mock with self.port() as port: with self.security_group() as sg1: with self.security_group() as sg2: data = {'port': {ext_sg.SECURITYGROUPS: [sg1['security_group']['id'], sg2['security_group']['id']]}} req = self.new_update_request( 'ports', data, port['port']['id']) res = req.get_response(self.api) res_body = self.deserialize(self.fmt, res) self.assertEqual(400, res.status_int) self.assertEqual('SecurityGroupMaximumCapacityReached', res_body['NeutronError']['type']) # Because the update has failed we excpect that the plugin will try to # revert any changes in the NSGroups - It is required to remove the # lport from any NSGroups which it was added to during that call. calls = [mock.call(NSG_IDS[1], consts.TARGET_TYPE_LOGICAL_PORT, mock.ANY), mock.call(NSG_IDS[2], consts.TARGET_TYPE_LOGICAL_PORT, mock.ANY)] remove_member_mock.assert_has_calls(calls, any_order=True) def test_create_security_group_rule_icmpv6_legacy_protocol_name(self): self.skipTest('not supported') vmware-nsx-12.0.1/vmware_nsx/tests/unit/extensions/test_provider_security_groups.py0000666000175100017510000004623113244523345031245 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import webob.exc from neutron.db import api as db_api from neutron.db import db_base_plugin_v2 from neutron.db import securitygroups_db from neutron.extensions import securitygroup as ext_sg from neutron.tests.unit.extensions import test_securitygroup from neutron_lib import context from vmware_nsx.db import extended_security_group from vmware_nsx.extensions import providersecuritygroup as provider_sg from vmware_nsx.tests.unit.nsx_v import test_plugin as test_nsxv_plugin from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsxv3_plugin PLUGIN_NAME = ('vmware_nsx.tests.unit.extensions.' 'test_provider_security_groups.ProviderSecurityGroupTestPlugin') # FIXME(arosen): make common mixin for extended_security_group_properties and # security_group_db_minxin. class ProviderSecurityGroupTestPlugin( db_base_plugin_v2.NeutronDbPluginV2, extended_security_group.ExtendedSecurityGroupPropertiesMixin, securitygroups_db.SecurityGroupDbMixin): supported_extension_aliases = ["security-group", "provider-security-group"] def create_security_group(self, context, security_group, default_sg=False): secgroup = security_group['security_group'] with db_api.context_manager.writer.using(context): # NOTE(arosen): a neutron security group by default adds rules # that allow egress traffic. We do not want this behavior for # provider security_groups if secgroup.get(provider_sg.PROVIDER) is True: secgroup_db = self.create_provider_security_group( context, security_group) else: secgroup_db = ( super(ProviderSecurityGroupTestPlugin, self ).create_security_group(context, security_group, default_sg)) self._process_security_group_properties_create(context, secgroup_db, secgroup, default_sg) return secgroup_db def create_port(self, context, port, l2gw_port_check=False): port_data = port['port'] with db_api.context_manager.writer.using(context): self._ensure_default_security_group_on_port(context, port) (sgids, provider_groups) = self._get_port_security_groups_lists( context, port) port_db = super(ProviderSecurityGroupTestPlugin, self).create_port( context, port) port_data.update(port_db) # handle adding security groups to port self._process_port_create_security_group( context, port_db, sgids) # handling adding provider security group to port if there are any self._process_port_create_provider_security_group( context, port_data, provider_groups) return port_data def update_port(self, context, id, port): with db_api.context_manager.writer.using(context): original_port = super(ProviderSecurityGroupTestPlugin, self).get_port(context, id) updated_port = super(ProviderSecurityGroupTestPlugin, self).update_port(context, id, port) self.update_security_group_on_port(context, id, port, original_port, updated_port) self._process_port_update_provider_security_group( context, port, original_port, updated_port) return self.get_port(context, id) def _make_port_dict(self, port, fields=None, process_extensions=True): port_data = super( ProviderSecurityGroupTestPlugin, self)._make_port_dict( port, fields=fields, process_extensions=process_extensions) self._remove_provider_security_groups_from_list(port_data) return port_data def delete_security_group(self, context, id): self._prevent_non_admin_delete_provider_sg(context, id) super(ProviderSecurityGroupTestPlugin, self).delete_security_group(context, id) def delete_security_group_rule(self, context, id): rule_db = self._get_security_group_rule(context, id) sg_id = rule_db['security_group_id'] self._prevent_non_admin_delete_provider_sg(context, sg_id) return super(ProviderSecurityGroupTestPlugin, self).delete_security_group_rule(context, id) def create_security_group_rule(self, context, security_group_rule): id = security_group_rule['security_group_rule']['security_group_id'] self._prevent_non_admin_delete_provider_sg(context, id) return super(ProviderSecurityGroupTestPlugin, self).create_security_group_rule(context, security_group_rule) class ProviderSecurityGroupExtTestCase( test_securitygroup.SecurityGroupDBTestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None): super(ProviderSecurityGroupExtTestCase, self).setUp( plugin=plugin, ext_mgr=ext_mgr) self._tenant_id = 'foobar' # add provider group attributes ext_sg.Securitygroup().update_attributes_map( provider_sg.EXTENDED_ATTRIBUTES_2_0) def _create_provider_security_group(self): body = {'security_group': {'name': 'provider-deny', 'tenant_id': self._tenant_id, 'description': 'foobarzzkk', 'provider': True}} security_group_req = self.new_create_request('security-groups', body) return self.deserialize(self.fmt, security_group_req.get_response(self.ext_api)) def test_create_provider_security_group(self): # confirm this attribute is true provider_secgroup = self._create_provider_security_group() self.assertTrue(provider_secgroup['security_group']['provider']) # provider security groups have no rules by default which is different # from normal neutron security groups which by default include a rule # to allow egress traffic. We confirm this here. self.assertEqual( provider_secgroup['security_group']['security_group_rules'], []) def test_create_provider_security_groups_same_tenant(self): provider_secgroup = self._create_provider_security_group() self.assertTrue(provider_secgroup['security_group']['provider']) # Verify that another one can also be created for the same tenant provider_secgroup2 = self._create_provider_security_group() self.assertTrue(provider_secgroup2['security_group']['provider']) def test_create_port_gets_provider_sg(self): # need to create provider security group first. provider_secgroup = self._create_provider_security_group() with self.port(tenant_id=self._tenant_id) as p: # check that the provider security group is on port resource. self.assertEqual(1, len(p['port']['provider_security_groups'])) self.assertEqual(provider_secgroup['security_group']['id'], p['port']['provider_security_groups'][0]) # confirm there is still a default security group. self.assertEqual(len(p['port']['security_groups']), 1) def test_create_port_gets_multi_provider_sg(self): # need to create provider security groups first. provider_secgroup1 = self._create_provider_security_group() provider_secgroup2 = self._create_provider_security_group() with self.port(tenant_id=self._tenant_id) as p: # check that the provider security group is on port resource. self.assertEqual(2, len(p['port']['provider_security_groups'])) self.assertIn(provider_secgroup1['security_group']['id'], p['port']['provider_security_groups']) self.assertIn(provider_secgroup2['security_group']['id'], p['port']['provider_security_groups']) # confirm there is still a default security group. self.assertEqual(len(p['port']['security_groups']), 1) def test_create_port_with_no_provider_sg(self): self._create_provider_security_group() with self.port(tenant_id=self._tenant_id, arg_list=('provider_security_groups', ), provider_security_groups=[]) as p1: self.assertEqual([], p1['port']['provider_security_groups']) with self.port(tenant_id=self._tenant_id, arg_list=('provider_security_groups', ), provider_security_groups=None) as p1: self.assertEqual([], p1['port']['provider_security_groups']) def test_update_port_remove_provider_sg_with_empty_list(self): # need to create provider security group first. self._create_provider_security_group() with self.port(tenant_id=self._tenant_id) as p: body = {'port': {'provider_security_groups': []}} req = self.new_update_request('ports', body, p['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) # confirm that the group has been removed. self.assertEqual([], port['port']['provider_security_groups']) def test_update_port_remove_provider_sg_with_none(self): # need to create provider security group first. self._create_provider_security_group() with self.port(tenant_id=self._tenant_id) as p: body = {'port': {'provider_security_groups': None}} req = self.new_update_request('ports', body, p['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) # confirm that the group has been removed. self.assertEqual([], port['port']['provider_security_groups']) def test_cannot_update_port_with_provider_group_as_sec_group(self): with self.port(tenant_id=self._tenant_id) as p: provider_secgroup = self._create_provider_security_group() sg_id = provider_secgroup['security_group']['id'] body = {'port': {'security_groups': [sg_id]}} req = self.new_update_request('ports', body, p['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_cannot_update_port_with_sec_group_as_provider(self): with self.security_group() as sg1: with self.port(tenant_id=self._tenant_id) as p: sg_id = sg1['security_group']['id'] body = {'port': {'provider_security_groups': [sg_id]}} req = self.new_update_request('ports', body, p['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_cannot_update_port_with_different_tenant_provider_secgroup(self): with self.port(tenant_id=self._tenant_id) as p: tmp_tenant_id = self._tenant_id self._tenant_id += "-alt" pvd_sg = self._create_provider_security_group() self._tenant_id = tmp_tenant_id body = {'port': {'provider_security_groups': [ pvd_sg['security_group']['id']]}} ctx = context.Context('', self._tenant_id) req = self.new_update_request('ports', body, p['port']['id'], context=ctx) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_update_port_security_groups_only(self): # We want to make sure that modifying security-groups on the port # doesn't impact the provider security-group on this port. provider_secgroup = self._create_provider_security_group() with self.security_group() as sg1: with self.port(tenant_id=self._tenant_id) as p: sg_id = sg1['security_group']['id'] body = {'port': {'security_groups': [sg_id]}} req = self.new_update_request('ports', body, p['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual( [provider_secgroup['security_group']['id']], port['port']['provider_security_groups']) def test_update_port_security_groups(self): with self.security_group() as sg1: with self.port(tenant_id=self._tenant_id) as p: # Port created before provider secgroup is created, so the port # would not be associated with the pvd secgroup at this point. provider_secgroup = self._create_provider_security_group() pvd_sg_id = provider_secgroup['security_group']['id'] sg_id = sg1['security_group']['id'] body = {'port': { 'security_groups': [sg_id], 'provider_security_groups': [pvd_sg_id]} } req = self.new_update_request('ports', body, p['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual([pvd_sg_id], port['port']['provider_security_groups']) self.assertEqual([sg_id], port['port']['security_groups']) def test_non_admin_cannot_delete_provider_sg_and_admin_can(self): provider_secgroup = self._create_provider_security_group() pvd_sg_id = provider_secgroup['security_group']['id'] # Try deleting the request as the normal tenant returns forbidden # as a tenant is not allowed to delete this. ctx = context.Context('', self._tenant_id) self._delete('security-groups', pvd_sg_id, expected_code=webob.exc.HTTPForbidden.code, neutron_context=ctx) # can be deleted though as admin self._delete('security-groups', pvd_sg_id, expected_code=webob.exc.HTTPNoContent.code) def test_non_admin_cannot_delete_provider_sg_rule(self): provider_secgroup = self._create_provider_security_group() pvd_sg_id = provider_secgroup['security_group']['id'] data = {'security_group_rule': {'security_group_id': pvd_sg_id, 'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4', 'tenant_id': self._tenant_id}} req = self.new_create_request('security-group-rules', data) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) sg_rule_id = res['security_group_rule']['id'] # Try deleting the request as the normal tenant returns forbidden # as a tenant is not allowed to delete this. ctx = context.Context('', self._tenant_id) self._delete('security-group-rules', sg_rule_id, expected_code=webob.exc.HTTPForbidden.code, neutron_context=ctx) # can be deleted though as admin self._delete('security-group-rules', sg_rule_id, expected_code=webob.exc.HTTPNoContent.code) def test_non_admin_cannot_add_provider_sg_rule(self): provider_secgroup = self._create_provider_security_group() pvd_sg_id = provider_secgroup['security_group']['id'] data = {'security_group_rule': {'security_group_id': pvd_sg_id, 'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4', 'tenant_id': self._tenant_id}} req = self.new_create_request( 'security-group-rules', data) req.environ['neutron.context'] = context.Context('', self._tenant_id) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) class TestNSXv3ProviderSecurityGrp(test_nsxv3_plugin.NsxV3PluginTestCaseMixin, ProviderSecurityGroupExtTestCase): def test_update_port_remove_provider_sg(self): # need to create provider security group first. self._create_provider_security_group() with self.port(tenant_id=self._tenant_id) as p: body = {'port': {'provider_security_groups': []}} req = self.new_update_request('ports', body, p['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) # confirm that the group has been removed. self.assertEqual([], port['port']['provider_security_groups']) # make sure that the security groups did not contain the provider # security group self.assertEqual(p['port']['security_groups'], port['port']['security_groups']) class TestNSXvProviderSecurityGroup(test_nsxv_plugin.NsxVPluginV2TestCase, ProviderSecurityGroupExtTestCase): def test_create_provider_security_group(self): _create_section_tmp = self.fc2.create_section def _create_section(*args, **kwargs): return _create_section_tmp(*args, **kwargs) with mock.patch.object(self.fc2, 'create_section', side_effect=_create_section) as create_sec_mock: super(TestNSXvProviderSecurityGroup, self).test_create_provider_security_group() create_sec_mock.assert_called_with('ip', mock.ANY, insert_top=True, insert_before=mock.ANY) def test_create_provider_security_group_rule(self): provider_secgroup = self._create_provider_security_group() sg_id = provider_secgroup['security_group']['id'] _create_nsx_rule_tmp = self.plugin._create_nsx_rule def m_create_nsx_rule(*args, **kwargs): return _create_nsx_rule_tmp(*args, **kwargs) with mock.patch.object(self.plugin, '_create_nsx_rule', side_effect=m_create_nsx_rule) as create_rule_m: with self.security_group_rule(security_group_id=sg_id): create_rule_m.assert_called_with(mock.ANY, mock.ANY, logged=mock.ANY, action='deny') vmware-nsx-12.0.1/vmware_nsx/tests/unit/extensions/test_dns_search_domain.py0000666000175100017510000001152513244523345027523 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import neutron.db.api as db from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import dns_search_domain as ext_dns_search_domain from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.tests.unit.nsx_v import test_plugin PLUGIN_NAME = 'vmware_nsx.plugin.NsxVPlugin' class DnsSearchDomainExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): return ext_dns_search_domain.get_extended_resources(version) class DnsSearchDomainExtensionTestCase(test_plugin.NsxVPluginV2TestCase): """Test API extension dns-search-domain attribute.""" @mock.patch.object(edge_utils.EdgeManager, '_deploy_edge') def setUp(self, plugin=PLUGIN_NAME): ext_mgr = DnsSearchDomainExtensionManager() super(DnsSearchDomainExtensionTestCase, self).setUp(ext_mgr=ext_mgr) def _create_subnet_with_dns_search_domain(self, dns_search_domain): with self.network() as net: tenant_id = net['network']['tenant_id'] net_id = net['network']['id'] data = {'subnet': {'network_id': net_id, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'name': 'test-dns-search-domain-subnet', 'tenant_id': tenant_id, 'dns_search_domain': dns_search_domain}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) return res def test_subnet_create_with_dns_search_domain(self): res = self._create_subnet_with_dns_search_domain('vmware.com') sub = self.deserialize(self.fmt, res) self.assertEqual('vmware.com', sub['subnet']['dns_search_domain']) def test_subnet_create_with_invalid_dns_search_domain_fail(self): res = self._create_subnet_with_dns_search_domain('vmw@re.com') self.assertEqual(400, res.status_int) def test_subnet_update_with_dns_search_domain(self): res = self._create_subnet_with_dns_search_domain('vmware.com') sub = self.deserialize(self.fmt, res) data = {'subnet': {'dns_search_domain': 'eng.vmware.com'}} req = self.new_update_request('subnets', data, sub['subnet']['id']) updated_sub = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual('eng.vmware.com', updated_sub['subnet']['dns_search_domain']) class DnsSearchDomainDBTestCase(test_db.NeutronDbPluginV2TestCase): def setUp(self): super(DnsSearchDomainDBTestCase, self).setUp() self.session = db.get_writer_session() def test_get_nsxv_subnet_ext_attributes_no_dns_search_domain(self): with self.subnet() as sub: sub_binding = nsxv_db.get_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id']) self.assertIsNone(sub_binding) def test_add_nsxv_subnet_ext_attributes_dns_search_domain(self): with self.subnet() as sub: nsxv_db.add_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dns_search_domain='eng.vmware.com') sub_binding = nsxv_db.get_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id']) self.assertEqual('eng.vmware.com', sub_binding.dns_search_domain) self.assertEqual(sub['subnet']['id'], sub_binding.subnet_id) def test_update_nsxv_subnet_ext_attributes_dns_search_domain(self): with self.subnet() as sub: nsxv_db.add_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dns_search_domain='eng.vmware.com') sub_binding = nsxv_db.update_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dns_search_domain='nsx.vmware.com') self.assertEqual('nsx.vmware.com', sub_binding.dns_search_domain) vmware-nsx-12.0.1/vmware_nsx/tests/unit/extensions/test_secgroup_rule_local_ip_prefix.py0000666000175100017510000001446113244523345032162 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import webob.exc from oslo_utils import uuidutils from neutron.db import api as db_api from neutron.db import db_base_plugin_v2 from neutron.db import securitygroups_db from neutron.extensions import securitygroup as ext_sg from neutron.tests.unit.extensions import test_securitygroup from neutron_lib import constants as const from neutron_lib.plugins import directory from vmware_nsx.db import extended_security_group_rule as ext_rule_db from vmware_nsx.extensions import secgroup_rule_local_ip_prefix as ext_loip from vmware_nsx.plugins.nsx_v.vshield import securitygroup_utils from vmware_nsx.tests.unit.nsx_v import test_plugin as test_nsxv_plugin from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsxv3_plugin PLUGIN_NAME = ('vmware_nsx.tests.unit.extensions.' 'test_secgroup_rule_local_ip_prefix.ExtendedRuleTestPlugin') _uuid = uuidutils.generate_uuid class ExtendedRuleTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, ext_rule_db.ExtendedSecurityGroupRuleMixin, securitygroups_db.SecurityGroupDbMixin): supported_extension_aliases = ["security-group", "secgroup-rule-local-ip-prefix"] def create_security_group_rule(self, context, security_group_rule): rule = security_group_rule['security_group_rule'] self._check_local_ip_prefix(context, rule) with db_api.context_manager.writer.using(context): res = super(ExtendedRuleTestPlugin, self).create_security_group_rule( context, security_group_rule) self._process_security_group_rule_properties(context, res, rule) return res class LocalIPPrefixExtTestCase(test_securitygroup.SecurityGroupDBTestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None): super(LocalIPPrefixExtTestCase, self).setUp( plugin=plugin, ext_mgr=ext_mgr) ext_sg.Securitygroup().update_attributes_map( ext_loip.RESOURCE_ATTRIBUTE_MAP) def _build_ingress_rule_with_local_ip_prefix(self, security_group_id, local_ip_prefix, remote_ip_prefix, direction='ingress'): rule = self._build_security_group_rule( security_group_id, remote_ip_prefix=remote_ip_prefix, direction=direction, proto=const.PROTO_NAME_UDP) rule['security_group_rule']['local_ip_prefix'] = local_ip_prefix return rule def test_raise_rule_not_ingress_when_local_ip_specified(self): local_ip_prefix = '239.255.0.0/16' remote_ip_prefix = '10.0.0.0/24' with self.security_group() as sg: rule = self._build_ingress_rule_with_local_ip_prefix( sg['security_group']['id'], local_ip_prefix, remote_ip_prefix, direction='egress') res = self._create_security_group_rule(self.fmt, rule) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_rule_with_local_ip_prefix(self): local_ip_prefix = '239.255.0.0/16' remote_ip_prefix = '10.0.0.0/24' with self.security_group() as sg: rule = self._build_ingress_rule_with_local_ip_prefix( sg['security_group']['id'], local_ip_prefix, remote_ip_prefix) res = self._make_security_group_rule(self.fmt, rule) self.assertEqual(local_ip_prefix, res['security_group_rule']['local_ip_prefix']) class TestNsxVExtendedSGRule(test_nsxv_plugin.NsxVSecurityGroupsTestCase, LocalIPPrefixExtTestCase): def test_create_rule_with_local_ip_prefix(self): sg_utils = securitygroup_utils.NsxSecurityGroupUtils(None) local_ip_prefix = '239.255.0.0/16' plugin = directory.get_plugin() dest = {'type': 'Ipv4Address', 'value': local_ip_prefix} plugin.nsx_sg_utils.get_rule_config = mock.Mock( side_effect=sg_utils.get_rule_config) super(TestNsxVExtendedSGRule, self).test_create_rule_with_local_ip_prefix() plugin.nsx_sg_utils.get_rule_config.assert_called_with( source=mock.ANY, destination=dest, services=mock.ANY, name=mock.ANY, applied_to_ids=mock.ANY, flags=mock.ANY, logged=mock.ANY, action=mock.ANY, tag=mock.ANY) class TestNSXv3ExtendedSGRule(test_nsxv3_plugin.NsxV3PluginTestCaseMixin, LocalIPPrefixExtTestCase): def test_create_rule_with_local_ip_prefix(self): sg_rules = [ {'tenant_id': mock.ANY, 'project_id': mock.ANY, 'id': mock.ANY, 'port_range_min': None, 'local_ip_prefix': '239.255.0.0/16', 'ethertype': 'IPv4', 'protocol': u'udp', 'remote_ip_prefix': '10.0.0.0/24', 'port_range_max': None, 'security_group_id': mock.ANY, 'remote_group_id': None, 'direction': u'ingress', 'description': ''}] with mock.patch( "vmware_nsxlib.v3.security.NsxLibFirewallSection.create_rules", side_effect=test_nsxv3_plugin._mock_create_firewall_rules, ) as mock_rule: super(TestNSXv3ExtendedSGRule, self).test_create_rule_with_local_ip_prefix() mock_rule.assert_called_with( mock.ANY, # content mock.ANY, # firewall_section_id mock.ANY, # ns_group_id False, # logging 'ALLOW', # action sg_rules, # sg_rules mock.ANY) # ruleid_2_remote_nsgroup_map vmware-nsx-12.0.1/vmware_nsx/tests/unit/extensions/test_metadata.py0000666000175100017510000003672613244523345025655 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from neutron_lib import constants from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from oslo_config import cfg import webob.exc from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import config class MetaDataTestCase(object): def _metadata_setup(self, mode=config.MetadataModes.DIRECT, on_demand=False): cfg.CONF.set_override('metadata_mode', mode, self.plugin.cfg_group) if hasattr(getattr(cfg.CONF, self.plugin.cfg_group), 'metadata_on_demand'): cfg.CONF.set_override('metadata_on_demand', on_demand, self.plugin.cfg_group) def _metadata_teardown(self): cfg.CONF.set_override('metadata_mode', None, self.plugin.cfg_group) if hasattr(getattr(cfg.CONF, self.plugin.cfg_group), 'metadata_on_demand'): cfg.CONF.set_override('metadata_on_demand', False, self.plugin.cfg_group) def _check_metadata(self, expected_subnets, expected_ports): subnets = self._list('subnets')['subnets'] self.assertEqual(len(subnets), expected_subnets) meta_net_id, meta_sub_id = None, None meta_cidr = netaddr.IPNetwork('169.254.0.0/16') for subnet in subnets: cidr = netaddr.IPNetwork(subnet['cidr']) if meta_cidr == cidr or meta_cidr in cidr.supernet(16): meta_sub_id = subnet['id'] meta_net_id = subnet['network_id'] break ports = self._list( 'ports', query_params='network_id=%s' % meta_net_id)['ports'] self.assertEqual(len(ports), expected_ports) meta_port_id = ports[0]['id'] if ports else None return meta_net_id, meta_sub_id, meta_port_id def test_router_add_interface_subnet_with_metadata_access(self): self._metadata_setup() self.test_router_add_interface_subnet() self._metadata_teardown() def test_router_add_interface_port_with_metadata_access(self): self._metadata_setup() self.test_router_add_interface_port() self._metadata_teardown() def test_router_add_interface_dupsubnet_returns_400_with_metadata(self): self._metadata_setup() self.test_router_add_interface_dup_subnet1_returns_400() self._metadata_teardown() def test_router_add_interface_overlapped_cidr_returns_400_with(self): self._metadata_setup() self.test_router_add_interface_overlapped_cidr_returns_400() self._metadata_teardown() def test_router_remove_interface_inuse_returns_409_with_metadata(self): self._metadata_setup() self.test_router_remove_interface_inuse_returns_409() self._metadata_teardown() def test_router_remove_iface_wrong_sub_returns_400_with_metadata(self): self._metadata_setup() self.test_router_remove_interface_wrong_subnet_returns_400() self._metadata_teardown() def test_router_delete_with_metadata_access(self): self._metadata_setup() self.test_router_delete() self._metadata_teardown() def test_router_delete_with_port_existed_returns_409_with_metadata(self): self._metadata_setup() self.test_router_delete_with_port_existed_returns_409() self._metadata_teardown() def test_delete_port_with_metadata(self): self._metadata_setup(config.MetadataModes.INDIRECT) with self.subnet() as s: with self.port(subnet=s, fixed_ips=[], device_id='1234', device_owner=constants.DEVICE_OWNER_DHCP) as port: self._delete('ports', port['port']['id']) self._metadata_teardown() def test_metadatata_network_created_with_router_interface_add(self): self._metadata_setup() with mock.patch.object(self._plugin_class, 'schedule_network') as f: with self.router() as r: with self.subnet() as s: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) r_ports = self._list('ports')['ports'] self.assertEqual(len(r_ports), 2) ips = [] for port in r_ports: ips.extend([netaddr.IPAddress(fixed_ip['ip_address']) for fixed_ip in port['fixed_ips']]) meta_cidr = netaddr.IPNetwork('169.254.0.0/16') self.assertTrue(any([ip in meta_cidr for ip in ips])) # Needed to avoid 409. self._router_interface_action('remove', r['router']['id'], s['subnet']['id'], None) # Verify that there has been a schedule_network all for the # metadata network expected_net_name = 'meta-%s' % r['router']['id'] found = False for call in f.call_args_list: # The network data are the last of the positional arguments net_dict = call[0][-1] if net_dict['name'] == expected_net_name: self.assertFalse(net_dict['port_security_enabled']) self.assertFalse(net_dict['shared']) self.assertFalse(net_dict['tenant_id']) found = True break else: self.fail("Expected schedule_network call for metadata " "network %s not found" % expected_net_name) self.assertTrue(found) self._metadata_teardown() def test_metadata_network_create_rollback_on_create_subnet_failure(self): self._metadata_setup() with self.router() as r: with self.subnet() as s: # Raise a NeutronException (eg: NotFound). with mock.patch.object(self._plugin_class, 'create_subnet', side_effect=n_exc.NotFound): self._router_interface_action( 'add', r['router']['id'], s['subnet']['id'], None) # Ensure metadata network was removed. nets = self._list('networks')['networks'] self.assertEqual(len(nets), 1) # Needed to avoid 409. self._router_interface_action('remove', r['router']['id'], s['subnet']['id'], None) self._metadata_teardown() def test_metadata_network_create_rollback_on_add_rtr_iface_failure(self): self._metadata_setup() with self.router() as r: with self.subnet() as s: # Save function being mocked. real_func = self._plugin_class.add_router_interface plugin_instance = directory.get_plugin() # Raise a NeutronException when adding metadata subnet # to router. def side_effect(*args): if args[-1]['subnet_id'] == s['subnet']['id']: # Do the real thing. return real_func(plugin_instance, *args) # Otherwise raise. raise api_exc.NsxApiException() with mock.patch.object(self._plugin_class, 'add_router_interface', side_effect=side_effect): self._router_interface_action( 'add', r['router']['id'], s['subnet']['id'], None) # Ensure metadata network was removed. nets = self._list('networks')['networks'] self.assertEqual(len(nets), 1) # Needed to avoid 409. self._router_interface_action('remove', r['router']['id'], s['subnet']['id'], None) self._metadata_teardown() def test_metadata_network_removed_with_router_interface_remove(self): self._metadata_setup() with self.router() as r: with self.subnet() as s: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) meta_net_id, meta_sub_id, meta_port_id = self._check_metadata( expected_subnets=2, expected_ports=1) self._router_interface_action('remove', r['router']['id'], s['subnet']['id'], None) self._show('networks', meta_net_id, webob.exc.HTTPNotFound.code) self._show('ports', meta_port_id, webob.exc.HTTPNotFound.code) self._show('subnets', meta_sub_id, webob.exc.HTTPNotFound.code) self._metadata_teardown() def test_metadata_network_remove_rollback_on_failure(self): self._metadata_setup() with self.router() as r: with self.subnet() as s: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) networks = self._list('networks')['networks'] for network in networks: if network['id'] != s['subnet']['network_id']: meta_net_id = network['id'] ports = self._list( 'ports', query_params='network_id=%s' % meta_net_id)['ports'] meta_port_id = ports[0]['id'] # Save function being mocked. real_func = self._plugin_class.remove_router_interface plugin_instance = directory.get_plugin() # Raise a NeutronException when removing metadata subnet # from router. def side_effect(*args): if args[-1].get('subnet_id') == s['subnet']['id']: # Do the real thing. return real_func(plugin_instance, *args) # Otherwise raise. raise api_exc.NsxApiException() with mock.patch.object(self._plugin_class, 'remove_router_interface', side_effect=side_effect): self._router_interface_action('remove', r['router']['id'], s['subnet']['id'], None) # Metadata network and subnet should still be there. self._show('networks', meta_net_id, webob.exc.HTTPOk.code) self._show('ports', meta_port_id, webob.exc.HTTPOk.code) self._metadata_teardown() def test_metadata_network_with_update_subnet_dhcp_enable(self): self._metadata_setup(on_demand=True) with self.router() as r: # Create a DHCP-disabled subnet. with self.subnet(enable_dhcp=False) as s: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) meta_net_id, meta_sub_id, meta_port_id = self._check_metadata( expected_subnets=2, expected_ports=1) # Update subnet to DHCP-enabled. data = {'subnet': {'enable_dhcp': True}} req = self.new_update_request('subnets', data, s['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(True, res['subnet']['enable_dhcp']) self._check_metadata(expected_subnets=1, expected_ports=0) self._show('networks', meta_net_id, webob.exc.HTTPNotFound.code) self._show('ports', meta_port_id, webob.exc.HTTPNotFound.code) self._show('subnets', meta_sub_id, webob.exc.HTTPNotFound.code) self._metadata_teardown() def test_metadata_network_with_update_subnet_dhcp_disable(self): self._metadata_setup(on_demand=True) with self.router() as r: # Create a DHCP-enabled subnet. with self.subnet(enable_dhcp=True) as s: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) self._check_metadata(expected_subnets=1, expected_ports=0) # Update subnet to DHCP-disabled. data = {'subnet': {'enable_dhcp': False}} req = self.new_update_request('subnets', data, s['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(False, res['subnet']['enable_dhcp']) meta_net_id, meta_sub_id, meta_port_id = self._check_metadata( expected_subnets=2, expected_ports=1) self._show('networks', meta_net_id, webob.exc.HTTPOk.code) self._show('ports', meta_port_id, webob.exc.HTTPOk.code) self._show('subnets', meta_sub_id, webob.exc.HTTPOk.code) self._metadata_teardown() def test_metadata_dhcp_host_route(self): self._metadata_setup(config.MetadataModes.INDIRECT) subnets = self._list('subnets')['subnets'] with self.subnet() as s: with self.port(subnet=s, device_id='1234', device_owner=constants.DEVICE_OWNER_DHCP) as port: subnets = self._list('subnets')['subnets'] self.assertEqual(len(subnets), 1) subnet_ip_net = netaddr.IPNetwork(s['subnet']['cidr']) self.assertIn(netaddr.IPAddress( subnets[0]['host_routes'][0]['nexthop']), subnet_ip_net) self.assertEqual(subnets[0]['host_routes'][0]['destination'], '169.254.169.254/32') self._delete('ports', port['port']['id']) subnets = self._list('subnets')['subnets'] # Test that route is deleted after dhcp port is removed. self.assertEqual(len(subnets[0]['host_routes']), 0) self._metadata_teardown() vmware-nsx-12.0.1/vmware_nsx/tests/unit/extensions/test_qosqueues.py0000666000175100017510000003214513244523345026116 0ustar zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from neutron_lib import context from oslo_config import cfg import webob.exc from neutron.tests.unit.api import test_extensions from vmware_nsx.db import qos_db from vmware_nsx.extensions import qos_queue as ext_qos from vmware_nsx.nsxlib import mh as nsxlib from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsx_mh import test_plugin as test_nsx_plugin class QoSTestExtensionManager(object): def get_resources(self): return ext_qos.Qos_queue.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class TestQoSQueue(test_nsx_plugin.NsxPluginV2TestCase): def setUp(self, plugin=None): cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) super(TestQoSQueue, self).setUp() ext_mgr = QoSTestExtensionManager() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) def _create_qos_queue(self, fmt, body, **kwargs): qos_queue = self.new_create_request('qos-queues', body) if (kwargs.get('set_context') and 'tenant_id' in kwargs): # create a specific auth context for this request qos_queue.environ['neutron.context'] = context.Context( '', kwargs['tenant_id']) return qos_queue.get_response(self.ext_api) @contextlib.contextmanager def qos_queue(self, name='foo', min='0', max='10', qos_marking=None, dscp='0', default=None, do_delete=True): body = {'qos_queue': {'tenant_id': 'tenant', 'name': name, 'min': min, 'max': max}} if qos_marking: body['qos_queue']['qos_marking'] = qos_marking if dscp: body['qos_queue']['dscp'] = dscp if default: body['qos_queue']['default'] = default res = self._create_qos_queue('json', body) qos_queue = self.deserialize('json', res) if res.status_int >= 400: raise webob.exc.HTTPClientError(code=res.status_int) yield qos_queue if do_delete: self._delete('qos-queues', qos_queue['qos_queue']['id']) def test_create_qos_queue(self): with self.qos_queue(name='fake_lqueue', min=34, max=44, qos_marking='untrusted', default=False) as q: self.assertEqual(q['qos_queue']['name'], 'fake_lqueue') self.assertEqual(q['qos_queue']['min'], 34) self.assertEqual(q['qos_queue']['max'], 44) self.assertEqual(q['qos_queue']['qos_marking'], 'untrusted') self.assertFalse(q['qos_queue']['default']) def test_create_trusted_qos_queue(self): with mock.patch.object(qos_db.LOG, 'info') as log: with mock.patch.object(nsxlib, 'do_request', return_value={"uuid": "fake_queue"}): with self.qos_queue(name='fake_lqueue', min=34, max=44, qos_marking='trusted', default=False) as q: self.assertIsNone(q['qos_queue']['dscp']) self.assertTrue(log.called) def test_create_qos_queue_name_exceeds_40_chars(self): name = 'this_is_a_queue_whose_name_is_longer_than_40_chars' with self.qos_queue(name=name) as queue: # Assert Neutron name is not truncated self.assertEqual(queue['qos_queue']['name'], name) def test_create_qos_queue_default(self): with self.qos_queue(default=True) as q: self.assertTrue(q['qos_queue']['default']) def test_create_qos_queue_two_default_queues_fail(self): with self.qos_queue(default=True): body = {'qos_queue': {'tenant_id': 'tenant', 'name': 'second_default_queue', 'default': True}} res = self._create_qos_queue('json', body) self.assertEqual(res.status_int, 409) def test_create_port_with_queue(self): with self.qos_queue(default=True) as q1: res = self._create_network('json', 'net1', True, arg_list=(ext_qos.QUEUE,), queue_id=q1['qos_queue']['id']) net1 = self.deserialize('json', res) self.assertEqual(net1['network'][ext_qos.QUEUE], q1['qos_queue']['id']) device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1" with self.port(device_id=device_id) as p: self.assertEqual(len(p['port'][ext_qos.QUEUE]), 36) def test_create_shared_queue_networks(self): with self.qos_queue(default=True, do_delete=False) as q1: res = self._create_network('json', 'net1', True, arg_list=(ext_qos.QUEUE,), queue_id=q1['qos_queue']['id']) net1 = self.deserialize('json', res) self.assertEqual(net1['network'][ext_qos.QUEUE], q1['qos_queue']['id']) res = self._create_network('json', 'net2', True, arg_list=(ext_qos.QUEUE,), queue_id=q1['qos_queue']['id']) net2 = self.deserialize('json', res) self.assertEqual(net1['network'][ext_qos.QUEUE], q1['qos_queue']['id']) device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1" res = self._create_port('json', net1['network']['id'], device_id=device_id) port1 = self.deserialize('json', res) res = self._create_port('json', net2['network']['id'], device_id=device_id) port2 = self.deserialize('json', res) self.assertEqual(port1['port'][ext_qos.QUEUE], port2['port'][ext_qos.QUEUE]) self._delete('ports', port1['port']['id']) self._delete('ports', port2['port']['id']) def test_remove_queue_in_use_fail(self): with self.qos_queue(do_delete=False) as q1: res = self._create_network('json', 'net1', True, arg_list=(ext_qos.QUEUE,), queue_id=q1['qos_queue']['id']) net1 = self.deserialize('json', res) device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1" res = self._create_port('json', net1['network']['id'], device_id=device_id) port = self.deserialize('json', res) self._delete('qos-queues', port['port'][ext_qos.QUEUE], 409) def test_update_network_new_queue(self): with self.qos_queue() as q1: res = self._create_network('json', 'net1', True, arg_list=(ext_qos.QUEUE,), queue_id=q1['qos_queue']['id']) net1 = self.deserialize('json', res) with self.qos_queue() as new_q: data = {'network': {ext_qos.QUEUE: new_q['qos_queue']['id']}} req = self.new_update_request('networks', data, net1['network']['id']) res = req.get_response(self.api) net1 = self.deserialize('json', res) self.assertEqual(net1['network'][ext_qos.QUEUE], new_q['qos_queue']['id']) def test_update_port_adding_device_id(self): with self.qos_queue(do_delete=False) as q1: res = self._create_network('json', 'net1', True, arg_list=(ext_qos.QUEUE,), queue_id=q1['qos_queue']['id']) net1 = self.deserialize('json', res) device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1" res = self._create_port('json', net1['network']['id']) port = self.deserialize('json', res) self.assertIsNone(port['port'][ext_qos.QUEUE]) data = {'port': {'device_id': device_id}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) port = self.deserialize('json', res) self.assertEqual(len(port['port'][ext_qos.QUEUE]), 36) def test_get_port_with_qos_not_admin(self): body = {'qos_queue': {'tenant_id': 'not_admin', 'name': 'foo', 'min': 20, 'max': 20}} res = self._create_qos_queue('json', body, tenant_id='not_admin') q1 = self.deserialize('json', res) res = self._create_network('json', 'net1', True, arg_list=(ext_qos.QUEUE, 'tenant_id',), queue_id=q1['qos_queue']['id'], tenant_id="not_admin") net1 = self.deserialize('json', res) self.assertEqual(len(net1['network'][ext_qos.QUEUE]), 36) res = self._create_port('json', net1['network']['id'], tenant_id='not_admin', set_context=True) port = self.deserialize('json', res) self.assertNotIn(ext_qos.QUEUE, port['port']) def test_dscp_value_out_of_range(self): body = {'qos_queue': {'tenant_id': 'admin', 'dscp': '64', 'name': 'foo', 'min': 20, 'max': 20}} res = self._create_qos_queue('json', body) self.assertEqual(res.status_int, 400) def test_dscp_value_with_qos_marking_trusted_returns_400(self): body = {'qos_queue': {'tenant_id': 'admin', 'dscp': '1', 'qos_marking': 'trusted', 'name': 'foo', 'min': 20, 'max': 20}} res = self._create_qos_queue('json', body) self.assertEqual(res.status_int, 400) def test_non_admin_cannot_create_queue(self): body = {'qos_queue': {'tenant_id': 'not_admin', 'name': 'foo', 'min': 20, 'max': 20}} res = self._create_qos_queue('json', body, tenant_id='not_admin', set_context=True) self.assertEqual(res.status_int, 403) def test_update_port_non_admin_does_not_show_queue_id(self): body = {'qos_queue': {'tenant_id': 'not_admin', 'name': 'foo', 'min': 20, 'max': 20}} res = self._create_qos_queue('json', body, tenant_id='not_admin') q1 = self.deserialize('json', res) res = self._create_network('json', 'net1', True, arg_list=(ext_qos.QUEUE,), tenant_id='not_admin', queue_id=q1['qos_queue']['id']) net1 = self.deserialize('json', res) res = self._create_port('json', net1['network']['id'], tenant_id='not_admin', set_context=True) port = self.deserialize('json', res) device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1" data = {'port': {'device_id': device_id}} neutron_context = context.Context('', 'not_admin') port = self._update('ports', port['port']['id'], data, neutron_context=neutron_context) self.assertNotIn(ext_qos.QUEUE, port['port']) def _test_rxtx_factor(self, max_value, rxtx_factor): with self.qos_queue(max=max_value) as q1: res = self._create_network('json', 'net1', True, arg_list=(ext_qos.QUEUE,), queue_id=q1['qos_queue']['id']) net1 = self.deserialize('json', res) res = self._create_port('json', net1['network']['id'], arg_list=(ext_qos.RXTX_FACTOR,), rxtx_factor=rxtx_factor, device_id='1') port = self.deserialize('json', res) req = self.new_show_request('qos-queues', port['port'][ext_qos.QUEUE]) res = req.get_response(self.ext_api) queue = self.deserialize('json', res) self.assertEqual(queue['qos_queue']['max'], max_value * rxtx_factor) def test_rxtx_factor(self): self._test_rxtx_factor(10, 2) def test_decimal_rxtx_factor(self): self._test_rxtx_factor(10, 1.5) def test_decimal_rxtx_factor_below_1(self): self._test_rxtx_factor(10, 0.5) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_tvd/0000775000175100017510000000000013244524600021725 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_tvd/__init__.py0000666000175100017510000000000013244523345024033 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_tvd/test_plugin.py0000666000175100017510000004124413244523345024650 0ustar zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from oslo_utils import uuidutils from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from vmware_nsx.tests.unit.dvs import test_plugin as dvs_tests from vmware_nsx.tests.unit.nsx_v import test_plugin as v_tests from vmware_nsx.tests.unit.nsx_v3 import test_plugin as t_tests PLUGIN_NAME = 'vmware_nsx.plugin.NsxTVDPlugin' _uuid = uuidutils.generate_uuid class NsxTVDPluginTestCase(v_tests.NsxVPluginV2TestCase, t_tests.NsxV3PluginTestCaseMixin, dvs_tests.NeutronSimpleDvsTestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): # set the default plugin if self.plugin_type: cfg.CONF.set_override('default_plugin', self.plugin_type, group="nsx_tvd") # set the default availability zones cfg.CONF.set_override('nsx_v_default_availability_zones', ['default'], group="nsx_tvd") cfg.CONF.set_override('nsx_v3_default_availability_zones', ['defaultv3'], group="nsx_tvd") super(NsxTVDPluginTestCase, self).setUp( plugin=plugin, ext_mgr=ext_mgr) self._project_id = _uuid() self.core_plugin = directory.get_plugin() # create a context with this tenant self.context = context.get_admin_context() self.context.tenant_id = self.project_id # create a default user for this plugin self.core_plugin.create_project_plugin_map(self.context, {'project_plugin_map': {'plugin': self.plugin_type, 'project': self.project_id}}) self.sub_plugin = self.core_plugin.get_plugin_by_type(self.plugin_type) @property def project_id(self): return self._project_id @property def plugin_type(self): pass def _test_plugin_initialized(self): self.assertTrue(self.core_plugin.is_tvd_plugin()) self.assertIsNotNone(self.sub_plugin) def _test_call_create(self, obj_name, calls_count=1, project_id=None, is_bulk=False): method_name = single_name = 'create_%s' % obj_name if is_bulk: method_name = method_name + '_bulk' func_to_call = getattr(self.core_plugin, method_name) if not project_id: project_id = self.project_id with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ mock.patch.object(self.sub_plugin, single_name) as single_func: if is_bulk: func_to_call(self.context, {obj_name + 's': [{obj_name: {'tenant_id': project_id}}]}) else: func_to_call(self.context, {obj_name: {'tenant_id': project_id}}) self.assertEqual(calls_count, sub_func.call_count or single_func.call_count) def _test_call_create_with_net_id(self, obj_name, field_name='network_id', calls_count=1, is_bulk=False): method_name = 'create_%s' % obj_name if is_bulk: method_name = method_name + '_bulk' func_to_call = getattr(self.core_plugin, method_name) net_id = _uuid() with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ mock.patch.object(self.core_plugin, '_get_network', return_value={'tenant_id': self.project_id}): if is_bulk: func_to_call(self.context, {obj_name + 's': [{obj_name: {'tenant_id': self.project_id, field_name: net_id}}]}) else: func_to_call(self.context, {obj_name: {'tenant_id': self.project_id, field_name: net_id}}) self.assertEqual(calls_count, sub_func.call_count) def _test_call_delete(self, obj_name): method_name = 'delete_%s' % obj_name func_to_call = getattr(self.core_plugin, method_name) obj_id = _uuid() with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ mock.patch.object(self.core_plugin, '_get_%s' % obj_name, return_value={'tenant_id': self.project_id}): func_to_call(self.context, obj_id) sub_func.assert_called_once() def _test_call_delete_with_net(self, obj_name, field_name='network_id'): method_name = 'delete_%s' % obj_name func_to_call = getattr(self.core_plugin, method_name) obj_id = _uuid() net_id = _uuid() with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ mock.patch.object(self.core_plugin, '_get_%s' % obj_name, return_value={field_name: net_id}),\ mock.patch.object(self.core_plugin, '_get_network', return_value={'tenant_id': self.project_id}): func_to_call(self.context, obj_id) sub_func.assert_called_once() def _test_call_update(self, obj_name): method_name = 'update_%s' % obj_name func_to_call = getattr(self.core_plugin, method_name) obj_id = _uuid() with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ mock.patch.object(self.core_plugin, '_get_%s' % obj_name, return_value={'tenant_id': self.project_id}): func_to_call(self.context, obj_id, {obj_name: {}}) sub_func.assert_called_once() def _test_call_update_with_net(self, obj_name, field_name='network_id'): method_name = 'update_%s' % obj_name func_to_call = getattr(self.core_plugin, method_name) obj_id = _uuid() net_id = _uuid() with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ mock.patch.object(self.core_plugin, '_get_%s' % obj_name, return_value={field_name: net_id}),\ mock.patch.object(self.core_plugin, '_get_network', return_value={'tenant_id': self.project_id}): func_to_call(self.context, obj_id, {obj_name: {}}) sub_func.assert_called_once() def _test_call_get(self, obj_name): method_name = 'get_%s' % obj_name func_to_call = getattr(self.core_plugin, method_name) obj_id = _uuid() with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ mock.patch.object(self.core_plugin, '_get_%s' % obj_name, return_value={'tenant_id': self.project_id}): func_to_call(self.context, obj_id) sub_func.assert_called_once() def _test_call_get_with_net(self, obj_name, field_name='network_id'): method_name = 'get_%s' % obj_name func_to_call = getattr(self.core_plugin, method_name) obj_id = _uuid() net_id = _uuid() with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ mock.patch.object(self.core_plugin, '_get_%s' % obj_name, return_value={field_name: net_id}),\ mock.patch.object(self.core_plugin, '_get_network', return_value={'tenant_id': self.project_id}): func_to_call(self.context, obj_id) sub_func.assert_called_once() class TestPluginWithDefaultPlugin(NsxTVDPluginTestCase): """Test TVD plugin with the NSX-T (default) sub plugin""" @property def plugin_type(self): return 'nsx-t' def test_plugin_initialized(self): self._test_plugin_initialized() # no unsupported extensions for the nsx_t plugin self.assertItemsEqual( ['router_type', 'router_size'], self.core_plugin._unsupported_fields[self.plugin_type]['router']) self.assertEqual( [], self.core_plugin._unsupported_fields[self.plugin_type]['port']) def test_create_network(self): self._test_call_create('network') def test_create_subnet(self): self._test_call_create_with_net_id('subnet') def test_create_port(self): self._test_call_create_with_net_id('port') def test_create_router(self): self._test_call_create('router') def test_create_floatingip(self): self._test_call_create_with_net_id( 'floatingip', field_name='floating_network_id') def test_create_security_group(self): # plugin will be called twice because of the default sg self._test_call_create('security_group', calls_count=2) def test_create_security_group_rule(self): self._test_call_create('security_group_rule') def test_create_network_bulk(self): self._test_call_create('network', is_bulk=True) def test_create_subnet_bulk(self): self._test_call_create_with_net_id('subnet', is_bulk=True) def test_create_security_group_rule_bulk(self): self._test_call_create('security_group_rule', is_bulk=True) def test_delete_network(self): self._test_call_delete('network') def test_delete_subnet(self): self._test_call_delete_with_net('subnet') def test_delete_port(self): self._test_call_delete_with_net('port') def test_delete_router(self): self._test_call_delete('router') def test_delete_floatingip(self): self._test_call_delete_with_net( 'floatingip', field_name='floating_network_id') def test_delete_security_group(self): self._test_call_delete('security_group') def test_update_network(self): self._test_call_update('network') def test_update_subnet(self): self._test_call_update_with_net('subnet') def test_update_port(self): self._test_call_update_with_net('port') def test_update_router(self): self._test_call_update('router') def test_update_floatingip(self): self._test_call_update_with_net( 'floatingip', field_name='floating_network_id') def test_update_security_group(self): self._test_call_update('security_group') def test_unsupported_extensions(self): self.assertRaises(n_exc.InvalidInput, self.core_plugin.create_router, self.context, {'router': {'tenant_id': self.project_id, 'router_type': 'exclusive'}}) def test_get_network(self): self._test_call_get('network') def test_get_subnet(self): self._test_call_get_with_net('subnet') def test_get_port(self): self._test_call_get_with_net('port') def test_get_router(self): self._test_call_get('router') def test_get_floatingip(self): self._test_call_get_with_net( 'floatingip', field_name='floating_network_id') def test_get_security_group(self): self._test_call_get('security_group') def test_add_router_interface(self): rtr_id = _uuid() port_id = _uuid() net_id = _uuid() with mock.patch.object(self.sub_plugin, 'add_router_interface') as sub_func,\ mock.patch.object(self.core_plugin, '_get_router', return_value={'tenant_id': self.project_id}),\ mock.patch.object(self.core_plugin, '_get_port', return_value={'network_id': net_id}),\ mock.patch.object(self.core_plugin, '_get_network', return_value={'tenant_id': self.project_id}),\ mock.patch.object(self.core_plugin, '_validate_interface_info', return_value=(True, False)): self.core_plugin.add_router_interface(self.context, rtr_id, {'port_id': port_id}) sub_func.assert_called_once() def test_add_invalid_router_interface(self): # Test that the plugin prevents adding interface from one plugin # to a router of another plugin rtr_id = _uuid() port_id = _uuid() net_id = _uuid() another_tenant_id = _uuid() another_plugin = 'nsx-v' if self.plugin_type == 'nsx-t' else 'nsx-t' self.core_plugin.create_project_plugin_map(self.context, {'project_plugin_map': {'plugin': another_plugin, 'project': another_tenant_id}}) with mock.patch.object(self.core_plugin, '_get_router', return_value={'tenant_id': self.project_id}),\ mock.patch.object(self.core_plugin, '_get_port', return_value={'network_id': net_id}),\ mock.patch.object(self.core_plugin, '_get_network', return_value={'tenant_id': another_tenant_id}),\ mock.patch.object(self.core_plugin, '_validate_interface_info', return_value=(True, False)): self.assertRaises(n_exc.InvalidInput, self.core_plugin.add_router_interface, self.context, rtr_id, {'port_id': port_id}) def test_remove_router_interface(self): rtr_id = _uuid() with mock.patch.object(self.sub_plugin, 'remove_router_interface') as sub_func,\ mock.patch.object(self.core_plugin, '_get_router', return_value={'tenant_id': self.project_id}): self.core_plugin.remove_router_interface(self.context, rtr_id, {}) sub_func.assert_called_once() def test_disassociate_floatingips(self): port_id = _uuid() net_id = _uuid() with mock.patch.object(self.sub_plugin, 'disassociate_floatingips') as sub_func,\ mock.patch.object(self.core_plugin, '_get_port', return_value={'network_id': net_id}),\ mock.patch.object(self.core_plugin, '_get_network', return_value={'tenant_id': self.project_id}): self.core_plugin.disassociate_floatingips(self.context, port_id) sub_func.assert_called_once() def test_new_user(self): project_id = _uuid() self._test_call_create('network', project_id=project_id) class TestPluginWithNsxv(TestPluginWithDefaultPlugin): """Test TVD plugin with the NSX-V sub plugin""" @property def plugin_type(self): return 'nsx-v' def test_plugin_initialized(self): self._test_plugin_initialized() # no unsupported extensions for the nsx_v plugin self.assertEqual( [], self.core_plugin._unsupported_fields[self.plugin_type]['router']) self.assertEqual( [], self.core_plugin._unsupported_fields[self.plugin_type]['port']) def test_unsupported_extensions(self): self.skipTest('No unsupported extensions in this plugin') class TestPluginWithDvs(TestPluginWithDefaultPlugin): """Test TVD plugin with the DVS sub plugin""" @property def plugin_type(self): return 'dvs' def test_plugin_initialized(self): self._test_plugin_initialized() # no unsupported extensions for the dvs plugin self.assertItemsEqual( ['mac_learning_enabled', 'provider_security_groups'], self.core_plugin._unsupported_fields[self.plugin_type]['port']) def test_unsupported_extensions(self): net_id = _uuid() with mock.patch.object(self.core_plugin, '_get_network', return_value={'tenant_id': self.project_id}): self.assertRaises(n_exc.InvalidInput, self.core_plugin.create_port, self.context, {'port': {'tenant_id': self.project_id, 'network_id': net_id, 'mac_learning_enabled': True}}) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_mh/0000775000175100017510000000000013244524600021534 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_mh/test_opts.py0000666000175100017510000002723213244523345024147 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from neutron.tests import base from oslo_config import cfg from oslo_utils import uuidutils import six from vmware_nsx.api_client import client from vmware_nsx.api_client import version from vmware_nsx.common import config # noqa from vmware_nsx.common import exceptions from vmware_nsx.common import sync from vmware_nsx import nsx_cluster from vmware_nsx.nsxlib.mh import lsn as lsnlib from vmware_nsx import plugin as mh_plugin from vmware_nsx.tests import unit as vmware BASE_CONF_PATH = vmware.get_fake_conf('neutron.conf.test') NSX_INI_PATH = vmware.get_fake_conf('nsx.ini.basic.test') NSX_INI_FULL_PATH = vmware.get_fake_conf('nsx.ini.full.test') NSX_INI_AGENTLESS_PATH = vmware.get_fake_conf('nsx.ini.agentless.test') NSX_INI_COMBINED_PATH = vmware.get_fake_conf('nsx.ini.combined.test') NVP_INI_DEPR_PATH = vmware.get_fake_conf('nvp.ini.full.test') class NSXClusterTest(base.BaseTestCase): cluster_opts = {'default_tz_uuid': uuidutils.generate_uuid(), 'default_l2_gw_service_uuid': uuidutils.generate_uuid(), 'default_l2_gw_service_uuid': uuidutils.generate_uuid(), 'nsx_user': 'foo', 'nsx_password': 'bar', 'http_timeout': 25, 'retries': 7, 'redirects': 23, 'nsx_default_interface_name': 'baz', 'nsx_controllers': ['1.1.1.1:443']} def test_create_cluster(self): cluster = nsx_cluster.NSXCluster(**self.cluster_opts) for (k, v) in six.iteritems(self.cluster_opts): self.assertEqual(v, getattr(cluster, k)) def test_create_cluster_default_port(self): opts = self.cluster_opts.copy() opts['nsx_controllers'] = ['1.1.1.1'] cluster = nsx_cluster.NSXCluster(**opts) for (k, v) in six.iteritems(self.cluster_opts): self.assertEqual(v, getattr(cluster, k)) def test_create_cluster_missing_required_attribute_raises(self): opts = self.cluster_opts.copy() opts.pop('default_tz_uuid') self.assertRaises(exceptions.InvalidClusterConfiguration, nsx_cluster.NSXCluster, **opts) class ConfigurationTest(base.BaseTestCase): def setUp(self): super(ConfigurationTest, self).setUp() # Avoid runs of the synchronizer looping call patch_sync = mock.patch.object(sync, '_start_loopingcall') patch_sync.start() def _assert_required_options(self, cluster): self.assertEqual(cluster.nsx_controllers, ['fake_1:443', 'fake_2:443']) self.assertEqual(cluster.default_tz_uuid, 'fake_tz_uuid') self.assertEqual(cluster.nsx_user, 'foo') self.assertEqual(cluster.nsx_password, 'bar') def _assert_extra_options(self, cluster): self.assertEqual(13, cluster.http_timeout) self.assertEqual(12, cluster.redirects) self.assertEqual(11, cluster.retries) self.assertEqual('whatever', cluster.default_l2_gw_service_uuid) self.assertEqual('whatever', cluster.default_l3_gw_service_uuid) self.assertEqual('whatever', cluster.nsx_default_interface_name) def _get_mh_plugin(self): with mock.patch("neutron.common.rpc.create_connection"): plugin = mh_plugin.NsxPlugin() return plugin def test_load_plugin_with_full_options(self): self.config_parse(args=['--config-file', BASE_CONF_PATH, '--config-file', NSX_INI_FULL_PATH]) cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) plugin = self._get_mh_plugin() cluster = plugin.cluster self._assert_required_options(cluster) self._assert_extra_options(cluster) def test_load_plugin_with_required_options_only(self): self.config_parse(args=['--config-file', BASE_CONF_PATH, '--config-file', NSX_INI_PATH]) cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) plugin = self._get_mh_plugin() self._assert_required_options(plugin.cluster) def test_defaults(self): self.assertEqual(5000, cfg.CONF.NSX.max_lp_per_bridged_ls) self.assertEqual(256, cfg.CONF.NSX.max_lp_per_overlay_ls) self.assertEqual(10, cfg.CONF.NSX.concurrent_connections) self.assertEqual('access_network', cfg.CONF.NSX.metadata_mode) self.assertEqual('stt', cfg.CONF.NSX.default_transport_type) self.assertEqual('service', cfg.CONF.NSX.replication_mode) self.assertIsNone(cfg.CONF.default_tz_uuid) self.assertEqual('admin', cfg.CONF.nsx_user) self.assertEqual('admin', cfg.CONF.nsx_password) self.assertEqual(75, cfg.CONF.http_timeout) self.assertEqual(2, cfg.CONF.retries) self.assertEqual(2, cfg.CONF.redirects) self.assertEqual([], cfg.CONF.nsx_controllers) self.assertIsNone(cfg.CONF.default_l3_gw_service_uuid) self.assertIsNone(cfg.CONF.default_l2_gw_service_uuid) self.assertEqual('breth0', cfg.CONF.nsx_default_interface_name) self.assertEqual(900, cfg.CONF.conn_idle_timeout) def test_load_api_extensions(self): self.config_parse(args=['--config-file', BASE_CONF_PATH, '--config-file', NSX_INI_FULL_PATH]) cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) # Load the configuration, and initialize the plugin self._get_mh_plugin() self.assertIn('extensions', cfg.CONF.api_extensions_path) def test_agentless_extensions(self): self.config_parse(args=['--config-file', BASE_CONF_PATH, '--config-file', NSX_INI_AGENTLESS_PATH]) cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) self.assertEqual(config.AgentModes.AGENTLESS, cfg.CONF.NSX.agent_mode) # The version returned from NSX matter as it has be exactly 4.1 with mock.patch.object(client.NsxApiClient, 'get_version', return_value=version.Version("4.1")): with mock.patch.object(lsnlib, 'service_cluster_exists', return_value=True): plugin = self._get_mh_plugin() self.assertNotIn('agent', plugin.supported_extension_aliases) self.assertNotIn('dhcp_agent_scheduler', plugin.supported_extension_aliases) self.assertNotIn('lsn', plugin.supported_extension_aliases) def test_agentless_extensions_version_fail(self): self.config_parse(args=['--config-file', BASE_CONF_PATH, '--config-file', NSX_INI_AGENTLESS_PATH]) cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) self.assertEqual(config.AgentModes.AGENTLESS, cfg.CONF.NSX.agent_mode) with mock.patch.object(client.NsxApiClient, 'get_version', return_value=version.Version("3.2")): try: self._get_mh_plugin() except exceptions.NsxPluginException: # This is the correct result pass else: self.fail('Expected NsxPluginException exception') def test_agentless_extensions_unmet_deps_fail(self): self.config_parse(args=['--config-file', BASE_CONF_PATH, '--config-file', NSX_INI_AGENTLESS_PATH]) cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) self.assertEqual(config.AgentModes.AGENTLESS, cfg.CONF.NSX.agent_mode) with mock.patch.object(client.NsxApiClient, 'get_version', return_value=version.Version("3.2")): with mock.patch.object(lsnlib, 'service_cluster_exists', return_value=False): try: self._get_mh_plugin() except exceptions.NsxPluginException: # This is the correct result pass else: self.fail('Expected NsxPluginException exception') def test_agent_extensions(self): self.config_parse(args=['--config-file', BASE_CONF_PATH, '--config-file', NSX_INI_FULL_PATH]) cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) self.assertEqual(config.AgentModes.AGENT, cfg.CONF.NSX.agent_mode) plugin = self._get_mh_plugin() self.assertIn('agent', plugin.supported_extension_aliases) self.assertIn('dhcp_agent_scheduler', plugin.supported_extension_aliases) def test_combined_extensions(self): self.config_parse(args=['--config-file', BASE_CONF_PATH, '--config-file', NSX_INI_COMBINED_PATH]) cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) self.assertEqual(config.AgentModes.COMBINED, cfg.CONF.NSX.agent_mode) with mock.patch.object(client.NsxApiClient, 'get_version', return_value=version.Version("4.1")): with mock.patch.object(lsnlib, 'service_cluster_exists', return_value=True): plugin = self._get_mh_plugin() self.assertIn('agent', plugin.supported_extension_aliases) self.assertIn('dhcp_agent_scheduler', plugin.supported_extension_aliases) self.assertIn('lsn', plugin.supported_extension_aliases) class OldNVPConfigurationTest(base.BaseTestCase): def setUp(self): super(OldNVPConfigurationTest, self).setUp() # Avoid runs of the synchronizer looping call patch_sync = mock.patch.object(sync, '_start_loopingcall') patch_sync.start() def _assert_required_options(self, cluster): self.assertEqual(cluster.nsx_controllers, ['fake_1:443', 'fake_2:443']) self.assertEqual(cluster.nsx_user, 'foo') self.assertEqual(cluster.nsx_password, 'bar') self.assertEqual(cluster.default_tz_uuid, 'fake_tz_uuid') def test_load_plugin_with_deprecated_options(self): self.config_parse(args=['--config-file', BASE_CONF_PATH, '--config-file', NVP_INI_DEPR_PATH]) cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) with mock.patch("neutron.common.rpc.create_connection"): plugin = mh_plugin.NsxPlugin() cluster = plugin.cluster # Verify old nvp_* params have been fully parsed self._assert_required_options(cluster) self.assertEqual(3, cluster.http_timeout) self.assertEqual(2, cluster.retries) self.assertEqual(2, cluster.redirects) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_mh/db/0000775000175100017510000000000013244524600022121 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_mh/db/__init__.py0000666000175100017510000000000013244523345024227 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_mh/db/test_lsn_db.py0000666000175100017510000001037413244523345025007 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron.tests.unit import testlib_api from neutron_lib import context from sqlalchemy import orm from vmware_nsx.common import exceptions as p_exc from vmware_nsx.db import lsn_db from vmware_nsx.db import nsx_models class LSNTestCase(testlib_api.SqlTestCase): def setUp(self): super(LSNTestCase, self).setUp() self.ctx = context.get_admin_context() self.net_id = 'foo_network_id' self.lsn_id = 'foo_lsn_id' self.lsn_port_id = 'foo_port_id' self.subnet_id = 'foo_subnet_id' self.mac_addr = 'aa:bb:cc:dd:ee:ff' def test_lsn_add(self): lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) lsn = (self.ctx.session.query(nsx_models.Lsn). filter_by(lsn_id=self.lsn_id).one()) self.assertEqual(self.lsn_id, lsn.lsn_id) def test_lsn_remove(self): lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) lsn_db.lsn_remove(self.ctx, self.lsn_id) q = self.ctx.session.query(nsx_models.Lsn).filter_by( lsn_id=self.lsn_id) self.assertRaises(orm.exc.NoResultFound, q.one) def test_lsn_remove_for_network(self): lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) lsn_db.lsn_remove_for_network(self.ctx, self.net_id) q = self.ctx.session.query(nsx_models.Lsn).filter_by( lsn_id=self.lsn_id) self.assertRaises(orm.exc.NoResultFound, q.one) def test_lsn_get_for_network(self): result = lsn_db.lsn_get_for_network(self.ctx, self.net_id, raise_on_err=False) self.assertIsNone(result) def test_lsn_get_for_network_raise_not_found(self): self.assertRaises(p_exc.LsnNotFound, lsn_db.lsn_get_for_network, self.ctx, self.net_id) def test_lsn_port_add(self): lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id, self.subnet_id, self.mac_addr, self.lsn_id) result = (self.ctx.session.query(nsx_models.LsnPort). filter_by(lsn_port_id=self.lsn_port_id).one()) self.assertEqual(self.lsn_port_id, result.lsn_port_id) def test_lsn_port_get_for_mac(self): lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id, self.subnet_id, self.mac_addr, self.lsn_id) result = lsn_db.lsn_port_get_for_mac(self.ctx, self.mac_addr) self.assertEqual(self.mac_addr, result.mac_addr) def test_lsn_port_get_for_mac_raise_not_found(self): self.assertRaises(p_exc.LsnPortNotFound, lsn_db.lsn_port_get_for_mac, self.ctx, self.mac_addr) def test_lsn_port_get_for_subnet(self): lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id, self.subnet_id, self.mac_addr, self.lsn_id) result = lsn_db.lsn_port_get_for_subnet(self.ctx, self.subnet_id) self.assertEqual(self.subnet_id, result.sub_id) def test_lsn_port_get_for_subnet_raise_not_found(self): self.assertRaises(p_exc.LsnPortNotFound, lsn_db.lsn_port_get_for_subnet, self.ctx, self.mac_addr) def test_lsn_port_remove(self): lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) lsn_db.lsn_port_remove(self.ctx, self.lsn_port_id) q = (self.ctx.session.query(nsx_models.LsnPort). filter_by(lsn_port_id=self.lsn_port_id)) self.assertRaises(orm.exc.NoResultFound, q.one) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_mh/db/test_nsx_db.py0000666000175100017510000000365013244523345025022 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron.db import models_v2 from neutron.tests.unit import testlib_api from neutron_lib import context from oslo_db import exception as d_exc from vmware_nsx.db import db as nsx_db class NsxDBTestCase(testlib_api.SqlTestCase): def setUp(self): super(NsxDBTestCase, self).setUp() self.ctx = context.get_admin_context() def _setup_neutron_network_and_port(self, network_id, port_id): with self.ctx.session.begin(subtransactions=True): self.ctx.session.add(models_v2.Network(id=network_id)) port = models_v2.Port(id=port_id, network_id=network_id, mac_address='foo_mac_address', admin_state_up=True, status='ACTIVE', device_id='', device_owner='') self.ctx.session.add(port) def test_add_neutron_nsx_port_mapping_raise_integrity_constraint(self): neutron_port_id = 'foo_neutron_port_id' nsx_port_id = 'foo_nsx_port_id' nsx_switch_id = 'foo_nsx_switch_id' self.assertRaises(d_exc.DBError, nsx_db.add_neutron_nsx_port_mapping, self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_mh/test_dhcpmeta.py0000666000175100017510000017232613244523345024754 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib import constants as n_consts from neutron_lib import context from neutron_lib import exceptions as n_exc from oslo_config import cfg from neutron.tests import base from neutron.tests.unit import testlib_api from vmware_nsx.api_client import exception from vmware_nsx.common import exceptions as p_exc from vmware_nsx.db import lsn_db from vmware_nsx.dhcp_meta import constants from vmware_nsx.dhcp_meta import lsnmanager as lsn_man from vmware_nsx.dhcp_meta import migration as mig_man from vmware_nsx.dhcp_meta import nsx from vmware_nsx.dhcp_meta import rpc class DhcpMetadataBuilderTestCase(base.BaseTestCase): def setUp(self): super(DhcpMetadataBuilderTestCase, self).setUp() self.builder = mig_man.DhcpMetadataBuilder(mock.Mock(), mock.Mock()) self.network_id = 'foo_network_id' self.subnet_id = 'foo_subnet_id' self.router_id = 'foo_router_id' def test_dhcp_agent_get_all(self): expected = [] self.builder.plugin.list_dhcp_agents_hosting_network.return_value = ( {'agents': expected}) agents = self.builder.dhcp_agent_get_all(mock.ANY, self.network_id) self.assertEqual(expected, agents) def test_dhcp_port_get_all(self): expected = [] self.builder.plugin.get_ports.return_value = expected ports = self.builder.dhcp_port_get_all(mock.ANY, self.network_id) self.assertEqual(expected, ports) def test_router_id_get(self): port = { 'device_id': self.router_id, 'network_id': self.network_id, 'fixed_ips': [{'subnet_id': self.subnet_id}] } subnet = { 'id': self.subnet_id, 'network_id': self.network_id } self.builder.plugin.get_ports.return_value = [port] result = self.builder.router_id_get(context, subnet) self.assertEqual(self.router_id, result) def test_router_id_get_none_subnet(self): self.assertIsNone(self.builder.router_id_get(mock.ANY, None)) def test_router_id_get_none_no_router(self): self.builder.plugin.get_ports.return_value = [] subnet = {'network_id': self.network_id} self.assertIsNone(self.builder.router_id_get(mock.ANY, subnet)) def test_metadata_deallocate(self): self.builder.metadata_deallocate( mock.ANY, self.router_id, self.subnet_id) self.assertTrue(self.builder.plugin.remove_router_interface.call_count) def test_metadata_allocate(self): self.builder.metadata_allocate( mock.ANY, self.router_id, self.subnet_id) self.assertTrue(self.builder.plugin.add_router_interface.call_count) def test_dhcp_deallocate(self): agents = [{'id': 'foo_agent_id'}] ports = [{'id': 'foo_port_id'}] self.builder.dhcp_deallocate(mock.ANY, self.network_id, agents, ports) self.assertTrue( self.builder.plugin.remove_network_from_dhcp_agent.call_count) self.assertTrue(self.builder.plugin.delete_port.call_count) def _test_dhcp_allocate(self, subnet, expected_notify_count): with mock.patch.object(mig_man.nsx, 'handle_network_dhcp_access') as f: self.builder.dhcp_allocate(mock.ANY, self.network_id, subnet) self.assertTrue(f.call_count) self.assertEqual(expected_notify_count, self.builder.notifier.notify.call_count) def test_dhcp_allocate(self): subnet = {'network_id': self.network_id, 'id': self.subnet_id} self._test_dhcp_allocate(subnet, 2) def test_dhcp_allocate_none_subnet(self): self._test_dhcp_allocate(None, 0) class MigrationManagerTestCase(base.BaseTestCase): def setUp(self): super(MigrationManagerTestCase, self).setUp() self.manager = mig_man.MigrationManager(mock.Mock(), mock.Mock(), mock.Mock()) self.network_id = 'foo_network_id' self.router_id = 'foo_router_id' self.subnet_id = 'foo_subnet_id' self.mock_builder_p = mock.patch.object(self.manager, 'builder') self.mock_builder = self.mock_builder_p.start() def _test_validate(self, lsn_exists=False, ext_net=False, subnets=None): network = {'router:external': ext_net} self.manager.manager.lsn_exists.return_value = lsn_exists self.manager.plugin.get_network.return_value = network self.manager.plugin.get_subnets.return_value = subnets result = self.manager.validate(mock.ANY, self.network_id) if len(subnets): self.assertEqual(subnets[0], result) else: self.assertIsNone(result) def test_validate_no_subnets(self): self._test_validate(subnets=[]) def test_validate_with_one_subnet(self): self._test_validate(subnets=[{'cidr': '0.0.0.0/0'}]) def test_validate_raise_conflict_many_subnets(self): self.assertRaises(p_exc.LsnMigrationConflict, self._test_validate, subnets=[{'id': 'sub1'}, {'id': 'sub2'}]) def test_validate_raise_conflict_lsn_exists(self): self.assertRaises(p_exc.LsnMigrationConflict, self._test_validate, lsn_exists=True) def test_validate_raise_badrequest_external_net(self): self.assertRaises(n_exc.BadRequest, self._test_validate, ext_net=True) def test_validate_raise_badrequest_metadata_net(self): self.assertRaises(n_exc.BadRequest, self._test_validate, ext_net=False, subnets=[{'cidr': rpc.METADATA_SUBNET_CIDR}]) def _test_migrate(self, router, subnet, expected_calls): self.mock_builder.router_id_get.return_value = router self.manager.migrate(mock.ANY, self.network_id, subnet) # testing the exact the order of calls is important self.assertEqual(expected_calls, self.mock_builder.mock_calls) def test_migrate(self): subnet = { 'id': self.subnet_id, 'network_id': self.network_id } call_sequence = [ mock.call.router_id_get(mock.ANY, subnet), mock.call.metadata_deallocate( mock.ANY, self.router_id, self.subnet_id), mock.call.dhcp_agent_get_all(mock.ANY, self.network_id), mock.call.dhcp_port_get_all(mock.ANY, self.network_id), mock.call.dhcp_deallocate( mock.ANY, self.network_id, mock.ANY, mock.ANY), mock.call.dhcp_allocate(mock.ANY, self.network_id, subnet), mock.call.metadata_allocate( mock.ANY, self.router_id, self.subnet_id) ] self._test_migrate(self.router_id, subnet, call_sequence) def test_migrate_no_router_uplink(self): subnet = { 'id': self.subnet_id, 'network_id': self.network_id } call_sequence = [ mock.call.router_id_get(mock.ANY, subnet), mock.call.dhcp_agent_get_all(mock.ANY, self.network_id), mock.call.dhcp_port_get_all(mock.ANY, self.network_id), mock.call.dhcp_deallocate( mock.ANY, self.network_id, mock.ANY, mock.ANY), mock.call.dhcp_allocate(mock.ANY, self.network_id, subnet), ] self._test_migrate(None, subnet, call_sequence) def test_migrate_no_subnet(self): call_sequence = [ mock.call.router_id_get(mock.ANY, None), mock.call.dhcp_allocate(mock.ANY, self.network_id, None), ] self._test_migrate(None, None, call_sequence) def _test_report(self, lsn_attrs, expected): self.manager.manager.lsn_port_get.return_value = lsn_attrs report = self.manager.report(mock.ANY, self.network_id, self.subnet_id) self.assertEqual(expected, report) def test_report_for_lsn(self): self._test_report(('foo_lsn_id', 'foo_lsn_port_id'), {'ports': ['foo_lsn_port_id'], 'services': ['foo_lsn_id'], 'type': 'lsn'}) def test_report_for_lsn_without_lsn_port(self): self._test_report(('foo_lsn_id', None), {'ports': [], 'services': ['foo_lsn_id'], 'type': 'lsn'}) def _test_report_for_lsn_without_subnet(self, validated_subnet): with mock.patch.object(self.manager.plugin, 'get_subnets', return_value=validated_subnet): self.manager.manager.lsn_port_get.return_value = ( ('foo_lsn_id', 'foo_lsn_port_id')) report = self.manager.report(context, self.network_id) expected = { 'ports': ['foo_lsn_port_id'] if validated_subnet else [], 'services': ['foo_lsn_id'], 'type': 'lsn' } self.assertEqual(expected, report) def test_report_for_lsn_without_subnet_subnet_found(self): self._test_report_for_lsn_without_subnet([{'id': self.subnet_id}]) def test_report_for_lsn_without_subnet_subnet_not_found(self): self.manager.manager.lsn_get.return_value = 'foo_lsn_id' self._test_report_for_lsn_without_subnet(None) def test_report_for_dhcp_agent(self): self.manager.manager.lsn_port_get.return_value = (None, None) self.mock_builder.dhcp_agent_get_all.return_value = ( [{'id': 'foo_agent_id'}]) self.mock_builder.dhcp_port_get_all.return_value = ( [{'id': 'foo_dhcp_port_id'}]) result = self.manager.report(mock.ANY, self.network_id, self.subnet_id) expected = { 'ports': ['foo_dhcp_port_id'], 'services': ['foo_agent_id'], 'type': 'agent' } self.assertEqual(expected, result) class LsnManagerTestCase(base.BaseTestCase): def setUp(self): super(LsnManagerTestCase, self).setUp() self.net_id = 'foo_network_id' self.sub_id = 'foo_subnet_id' self.port_id = 'foo_port_id' self.lsn_id = 'foo_lsn_id' self.mac = 'aa:bb:cc:dd:ee:ff' self.switch_id = 'foo_switch_id' self.lsn_port_id = 'foo_lsn_port_id' self.tenant_id = 'foo_tenant_id' self.manager = lsn_man.LsnManager(mock.Mock()) self.context = context.get_admin_context() self.mock_lsn_api_p = mock.patch.object(lsn_man, 'lsn_api') self.mock_lsn_api = self.mock_lsn_api_p.start() self.mock_nsx_utils_p = mock.patch.object(lsn_man, 'nsx_utils') self.mock_nsx_utils = self.mock_nsx_utils_p.start() nsx.register_dhcp_opts(cfg) nsx.register_metadata_opts(cfg) def test_lsn_get(self): self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id expected = self.manager.lsn_get(mock.ANY, self.net_id) self.mock_lsn_api.lsn_for_network_get.assert_called_once_with( mock.ANY, self.net_id) self.assertEqual(expected, self.lsn_id) def _test_lsn_get_raise_not_found_with_exc(self, exc): self.mock_lsn_api.lsn_for_network_get.side_effect = exc self.assertRaises(p_exc.LsnNotFound, self.manager.lsn_get, mock.ANY, self.net_id) self.mock_lsn_api.lsn_for_network_get.assert_called_once_with( mock.ANY, self.net_id) def test_lsn_get_raise_not_found_with_not_found(self): self._test_lsn_get_raise_not_found_with_exc(n_exc.NotFound) def test_lsn_get_raise_not_found_with_api_error(self): self._test_lsn_get_raise_not_found_with_exc(exception.NsxApiException) def _test_lsn_get_silent_raise_with_exc(self, exc): self.mock_lsn_api.lsn_for_network_get.side_effect = exc expected = self.manager.lsn_get( mock.ANY, self.net_id, raise_on_err=False) self.mock_lsn_api.lsn_for_network_get.assert_called_once_with( mock.ANY, self.net_id) self.assertIsNone(expected) def test_lsn_get_silent_raise_with_not_found(self): self._test_lsn_get_silent_raise_with_exc(n_exc.NotFound) def test_lsn_get_silent_raise_with_api_error(self): self._test_lsn_get_silent_raise_with_exc(exception.NsxApiException) def test_lsn_create(self): self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id self.manager.lsn_create(mock.ANY, self.net_id) self.mock_lsn_api.lsn_for_network_create.assert_called_once_with( mock.ANY, self.net_id) def test_lsn_create_raise_api_error(self): self.mock_lsn_api.lsn_for_network_create.side_effect = ( exception.NsxApiException) self.assertRaises(p_exc.NsxPluginException, self.manager.lsn_create, mock.ANY, self.net_id) self.mock_lsn_api.lsn_for_network_create.assert_called_once_with( mock.ANY, self.net_id) def test_lsn_delete(self): self.manager.lsn_delete(mock.ANY, self.lsn_id) self.mock_lsn_api.lsn_delete.assert_called_once_with( mock.ANY, self.lsn_id) def _test_lsn_delete_with_exc(self, exc): self.mock_lsn_api.lsn_delete.side_effect = exc self.manager.lsn_delete(mock.ANY, self.lsn_id) self.mock_lsn_api.lsn_delete.assert_called_once_with( mock.ANY, self.lsn_id) def test_lsn_delete_with_not_found(self): self._test_lsn_delete_with_exc(n_exc.NotFound) def test_lsn_delete_api_exception(self): self._test_lsn_delete_with_exc(exception.NsxApiException) def test_lsn_delete_by_network(self): self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id with mock.patch.object(self.manager, 'lsn_delete') as f: self.manager.lsn_delete_by_network(mock.ANY, self.net_id) self.mock_lsn_api.lsn_for_network_get.assert_called_once_with( mock.ANY, self.net_id) f.assert_called_once_with(mock.ANY, self.lsn_id) def _test_lsn_delete_by_network_with_exc(self, exc): self.mock_lsn_api.lsn_for_network_get.side_effect = exc with mock.patch.object(lsn_man.LOG, 'warning') as l: self.manager.lsn_delete_by_network(mock.ANY, self.net_id) self.assertEqual(1, l.call_count) def test_lsn_delete_by_network_with_not_found(self): self._test_lsn_delete_by_network_with_exc(n_exc.NotFound) def test_lsn_delete_by_network_with_not_api_error(self): self._test_lsn_delete_by_network_with_exc(exception.NsxApiException) def test_lsn_port_get(self): self.mock_lsn_api.lsn_port_by_subnet_get.return_value = ( self.lsn_port_id) with mock.patch.object( self.manager, 'lsn_get', return_value=self.lsn_id): expected = self.manager.lsn_port_get( mock.ANY, self.net_id, self.sub_id) self.assertEqual(expected, (self.lsn_id, self.lsn_port_id)) def test_lsn_port_get_lsn_not_found_on_raise(self): with mock.patch.object( self.manager, 'lsn_get', side_effect=p_exc.LsnNotFound(entity='network', entity_id=self.net_id)): self.assertRaises(p_exc.LsnNotFound, self.manager.lsn_port_get, mock.ANY, self.net_id, self.sub_id) def test_lsn_port_get_lsn_not_found_silent_raise(self): with mock.patch.object(self.manager, 'lsn_get', return_value=None): result = self.manager.lsn_port_get( mock.ANY, self.net_id, self.sub_id, raise_on_err=False) expected = (None, None) self.assertEqual(expected, result) def test_lsn_port_get_port_not_found_on_raise(self): self.mock_lsn_api.lsn_port_by_subnet_get.side_effect = n_exc.NotFound with mock.patch.object( self.manager, 'lsn_get', return_value=self.lsn_id): self.assertRaises(p_exc.LsnPortNotFound, self.manager.lsn_port_get, mock.ANY, self.net_id, self.sub_id) def test_lsn_port_get_port_not_found_silent_raise(self): self.mock_lsn_api.lsn_port_by_subnet_get.side_effect = n_exc.NotFound with mock.patch.object( self.manager, 'lsn_get', return_value=self.lsn_id): result = self.manager.lsn_port_get( mock.ANY, self.net_id, self.sub_id, raise_on_err=False) expected = (self.lsn_id, None) self.assertEqual(expected, result) def test_lsn_port_create(self): self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id expected = self.manager.lsn_port_create(mock.ANY, mock.ANY, mock.ANY) self.assertEqual(expected, self.lsn_port_id) def _test_lsn_port_create_with_exc(self, exc, expected): self.mock_lsn_api.lsn_port_create.side_effect = exc self.assertRaises(expected, self.manager.lsn_port_create, mock.ANY, mock.ANY, mock.ANY) def test_lsn_port_create_with_not_found(self): self._test_lsn_port_create_with_exc(n_exc.NotFound, p_exc.LsnNotFound) def test_lsn_port_create_api_exception(self): self._test_lsn_port_create_with_exc(exception.NsxApiException, p_exc.NsxPluginException) def test_lsn_port_delete(self): self.manager.lsn_port_delete(mock.ANY, mock.ANY, mock.ANY) self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count) def _test_lsn_port_delete_with_exc(self, exc): self.mock_lsn_api.lsn_port_delete.side_effect = exc with mock.patch.object(lsn_man.LOG, 'warning') as l: self.manager.lsn_port_delete(mock.ANY, mock.ANY, mock.ANY) self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count) self.assertEqual(1, l.call_count) def test_lsn_port_delete_with_not_found(self): self._test_lsn_port_delete_with_exc(n_exc.NotFound) def test_lsn_port_delete_api_exception(self): self._test_lsn_port_delete_with_exc(exception.NsxApiException) def _test_lsn_port_dhcp_setup(self, ret_val, sub): self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id with mock.patch.object( self.manager, 'lsn_get', return_value=self.lsn_id): with mock.patch.object(lsn_man.switch_api, 'get_port_by_neutron_tag'): expected = self.manager.lsn_port_dhcp_setup( mock.Mock(), mock.ANY, mock.ANY, mock.ANY, subnet_config=sub) self.assertEqual( 1, self.mock_lsn_api.lsn_port_create.call_count) self.assertEqual( 1, self.mock_lsn_api.lsn_port_plug_network.call_count) self.assertEqual(expected, ret_val) def test_lsn_port_dhcp_setup(self): self._test_lsn_port_dhcp_setup((self.lsn_id, self.lsn_port_id), None) def test_lsn_port_dhcp_setup_with_config(self): with mock.patch.object(self.manager, 'lsn_port_dhcp_configure') as f: self._test_lsn_port_dhcp_setup(None, mock.ANY) self.assertEqual(1, f.call_count) def test_lsn_port_dhcp_setup_with_not_found(self): self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] with mock.patch.object(lsn_man.switch_api, 'get_port_by_neutron_tag') as f: f.side_effect = n_exc.NotFound self.assertRaises(p_exc.PortConfigurationError, self.manager.lsn_port_dhcp_setup, mock.Mock(), mock.ANY, mock.ANY, mock.ANY) def test_lsn_port_dhcp_setup_with_conflict(self): self.mock_lsn_api.lsn_port_plug_network.side_effect = ( p_exc.LsnConfigurationConflict(lsn_id=self.lsn_id)) self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] with mock.patch.object(lsn_man.switch_api, 'get_port_by_neutron_tag'): with mock.patch.object(self.manager, 'lsn_port_delete') as g: self.assertRaises(p_exc.PortConfigurationError, self.manager.lsn_port_dhcp_setup, mock.Mock(), mock.ANY, mock.ANY, mock.ANY) self.assertEqual(1, g.call_count) def _test_lsn_port_dhcp_configure_with_subnet( self, expected, dns=None, gw=None, routes=None): subnet = { 'enable_dhcp': True, 'dns_nameservers': dns or [], 'gateway_ip': gw, 'host_routes': routes } self.manager.lsn_port_dhcp_configure(mock.ANY, self.lsn_id, self.lsn_port_id, subnet) self.mock_lsn_api.lsn_port_dhcp_configure.assert_called_once_with( mock.ANY, self.lsn_id, self.lsn_port_id, subnet['enable_dhcp'], expected) def test_lsn_port_dhcp_configure(self): expected = { 'routers': '127.0.0.1', 'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time, 'domain_name': cfg.CONF.NSX_DHCP.domain_name } self._test_lsn_port_dhcp_configure_with_subnet( expected, dns=[], gw='127.0.0.1', routes=[]) def test_lsn_port_dhcp_configure_gatewayless(self): expected = { 'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time, 'domain_name': cfg.CONF.NSX_DHCP.domain_name } self._test_lsn_port_dhcp_configure_with_subnet(expected, gw=None) def test_lsn_port_dhcp_configure_with_extra_dns_servers(self): expected = { 'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time, 'domain_name_servers': '8.8.8.8,9.9.9.9', 'domain_name': cfg.CONF.NSX_DHCP.domain_name } self._test_lsn_port_dhcp_configure_with_subnet( expected, dns=['8.8.8.8', '9.9.9.9']) def test_lsn_port_dhcp_configure_with_host_routes(self): expected = { 'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time, 'domain_name': cfg.CONF.NSX_DHCP.domain_name, 'classless_static_routes': '8.8.8.8,9.9.9.9' } self._test_lsn_port_dhcp_configure_with_subnet( expected, routes=['8.8.8.8', '9.9.9.9']) def _test_lsn_metadata_configure(self, is_enabled): with mock.patch.object(self.manager, 'lsn_port_dispose') as f: self.manager.plugin.get_subnet.return_value = ( {'network_id': self.net_id}) self.manager.lsn_metadata_configure(mock.ANY, self.sub_id, is_enabled) expected = { 'metadata_server_port': 8775, 'metadata_server_ip': '127.0.0.1', 'metadata_proxy_shared_secret': '' } self.mock_lsn_api.lsn_metadata_configure.assert_called_once_with( mock.ANY, mock.ANY, is_enabled, expected) if is_enabled: self.assertEqual( 1, self.mock_lsn_api.lsn_port_by_subnet_get.call_count) else: self.assertEqual(1, f.call_count) def test_lsn_metadata_configure_enabled(self): self._test_lsn_metadata_configure(True) def test_lsn_metadata_configure_disabled(self): self._test_lsn_metadata_configure(False) def test_lsn_metadata_configure_not_found(self): self.mock_lsn_api.lsn_metadata_configure.side_effect = ( p_exc.LsnNotFound(entity='lsn', entity_id=self.lsn_id)) self.manager.plugin.get_subnet.return_value = ( {'network_id': self.net_id}) self.assertRaises(p_exc.NsxPluginException, self.manager.lsn_metadata_configure, mock.ANY, self.sub_id, True) def test_lsn_port_metadata_setup(self): subnet = { 'cidr': '0.0.0.0/0', 'id': self.sub_id, 'network_id': self.net_id, 'tenant_id': self.tenant_id } expected_data = { 'subnet_id': subnet['id'], 'ip_address': subnet['cidr'], 'mac_address': constants.METADATA_MAC } self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] with mock.patch.object(lsn_man.switch_api, 'create_lport') as f: with mock.patch.object(self.manager, 'lsn_port_create') as g: f.return_value = {'uuid': self.port_id} self.manager.lsn_port_metadata_setup( self.context, self.lsn_id, subnet) (self.mock_lsn_api.lsn_port_plug_network. assert_called_once_with(mock.ANY, self.lsn_id, mock.ANY, self.port_id)) g.assert_called_once_with( self.context, self.lsn_id, expected_data) def test_lsn_port_metadata_setup_raise_not_found(self): subnet = { 'cidr': '0.0.0.0/0', 'id': self.sub_id, 'network_id': self.net_id, 'tenant_id': self.tenant_id } self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] with mock.patch.object(lsn_man.switch_api, 'create_lport') as f: f.side_effect = n_exc.NotFound self.assertRaises(p_exc.PortConfigurationError, self.manager.lsn_port_metadata_setup, mock.Mock(), self.lsn_id, subnet) def test_lsn_port_metadata_setup_raise_conflict(self): subnet = { 'cidr': '0.0.0.0/0', 'id': self.sub_id, 'network_id': self.net_id, 'tenant_id': self.tenant_id } self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] with mock.patch.object(lsn_man.switch_api, 'create_lport') as f: with mock.patch.object(lsn_man.switch_api, 'delete_port') as g: f.return_value = {'uuid': self.port_id} self.mock_lsn_api.lsn_port_plug_network.side_effect = ( p_exc.LsnConfigurationConflict(lsn_id=self.lsn_id)) self.assertRaises(p_exc.PortConfigurationError, self.manager.lsn_port_metadata_setup, mock.Mock(), self.lsn_id, subnet) self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count) self.assertEqual(1, g.call_count) def _test_lsn_port_dispose_with_values(self, lsn_id, lsn_port_id, count): with mock.patch.object(self.manager, 'lsn_port_get_by_mac', return_value=(lsn_id, lsn_port_id)): self.manager.lsn_port_dispose(mock.ANY, self.net_id, self.mac) self.assertEqual(count, self.mock_lsn_api.lsn_port_delete.call_count) def test_lsn_port_dispose(self): self._test_lsn_port_dispose_with_values( self.lsn_id, self.lsn_port_id, 1) def test_lsn_port_dispose_meta_mac(self): self.mac = constants.METADATA_MAC with mock.patch.object(lsn_man.switch_api, 'get_port_by_neutron_tag') as f: with mock.patch.object(lsn_man.switch_api, 'delete_port') as g: f.return_value = {'uuid': self.port_id} self._test_lsn_port_dispose_with_values( self.lsn_id, self.lsn_port_id, 1) f.assert_called_once_with( mock.ANY, self.net_id, constants.METADATA_PORT_ID) g.assert_called_once_with(mock.ANY, self.net_id, self.port_id) def test_lsn_port_dispose_lsn_not_found(self): self._test_lsn_port_dispose_with_values(None, None, 0) def test_lsn_port_dispose_lsn_port_not_found(self): self._test_lsn_port_dispose_with_values(self.lsn_id, None, 0) def test_lsn_port_dispose_api_error(self): self.mock_lsn_api.lsn_port_delete.side_effect = ( exception.NsxApiException) with mock.patch.object(lsn_man.LOG, 'warning') as l: self.manager.lsn_port_dispose(mock.ANY, self.net_id, self.mac) self.assertEqual(1, l.call_count) def test_lsn_port_host_conf(self): with mock.patch.object(self.manager, 'lsn_port_get', return_value=(self.lsn_id, self.lsn_port_id)): f = mock.Mock() self.manager._lsn_port_host_conf(mock.ANY, self.net_id, self.sub_id, mock.ANY, f) self.assertEqual(1, f.call_count) def test_lsn_port_host_conf_lsn_port_not_found(self): with mock.patch.object( self.manager, 'lsn_port_get', return_value=(None, None)) as f: self.manager._lsn_port_host_conf( mock.ANY, self.net_id, self.sub_id, mock.ANY, mock.Mock()) self.assertEqual(1, f.call_count) def _test_lsn_port_update(self, dhcp=None, meta=None): self.manager.lsn_port_update( mock.ANY, self.net_id, self.sub_id, dhcp, meta) count = 1 if dhcp else 0 count = count + 1 if meta else count self.assertEqual(count, (self.mock_lsn_api. lsn_port_host_entries_update.call_count)) def test_lsn_port_update(self): self._test_lsn_port_update() def test_lsn_port_update_dhcp_meta(self): self._test_lsn_port_update(mock.ANY, mock.ANY) def test_lsn_port_update_dhcp_and_nometa(self): self._test_lsn_port_update(mock.ANY, None) def test_lsn_port_update_nodhcp_and_nmeta(self): self._test_lsn_port_update(None, mock.ANY) def test_lsn_port_update_raise_error(self): self.mock_lsn_api.lsn_port_host_entries_update.side_effect = ( exception.NsxApiException) self.assertRaises(p_exc.PortConfigurationError, self.manager.lsn_port_update, mock.ANY, mock.ANY, mock.ANY, mock.ANY) class PersistentLsnManagerTestCase(testlib_api.SqlTestCase): def setUp(self): super(PersistentLsnManagerTestCase, self).setUp() self.net_id = 'foo_network_id' self.sub_id = 'foo_subnet_id' self.port_id = 'foo_port_id' self.lsn_id = 'foo_lsn_id' self.mac = 'aa:bb:cc:dd:ee:ff' self.lsn_port_id = 'foo_lsn_port_id' self.tenant_id = 'foo_tenant_id' nsx.register_dhcp_opts(cfg) nsx.register_metadata_opts(cfg) lsn_man.register_lsn_opts(cfg) self.manager = lsn_man.PersistentLsnManager(mock.Mock()) self.context = context.get_admin_context() self.mock_lsn_api_p = mock.patch.object(lsn_man, 'lsn_api') self.mock_lsn_api = self.mock_lsn_api_p.start() def test_lsn_get(self): lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) result = self.manager.lsn_get(self.context, self.net_id) self.assertEqual(self.lsn_id, result) def test_lsn_get_raise_not_found(self): self.assertRaises(p_exc.LsnNotFound, self.manager.lsn_get, self.context, self.net_id) def test_lsn_get_silent_not_found(self): result = self.manager.lsn_get( self.context, self.net_id, raise_on_err=False) self.assertIsNone(result) def test_lsn_get_sync_on_missing(self): cfg.CONF.set_override('sync_on_missing_data', True, 'NSX_LSN') self.manager = lsn_man.PersistentLsnManager(mock.Mock()) with mock.patch.object(self.manager, 'lsn_save') as f: self.manager.lsn_get(self.context, self.net_id, raise_on_err=True) self.assertTrue(self.mock_lsn_api.lsn_for_network_get.call_count) self.assertTrue(f.call_count) def test_lsn_save(self): self.manager.lsn_save(self.context, self.net_id, self.lsn_id) result = self.manager.lsn_get(self.context, self.net_id) self.assertEqual(self.lsn_id, result) def test_lsn_create(self): self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id with mock.patch.object(self.manager, 'lsn_save') as f: result = self.manager.lsn_create(self.context, self.net_id) self.assertTrue( self.mock_lsn_api.lsn_for_network_create.call_count) self.assertTrue(f.call_count) self.assertEqual(self.lsn_id, result) def test_lsn_create_failure(self): with mock.patch.object( self.manager, 'lsn_save', side_effect=p_exc.NsxPluginException(err_msg='')): self.assertRaises(p_exc.NsxPluginException, self.manager.lsn_create, self.context, self.net_id) self.assertTrue(self.mock_lsn_api.lsn_delete.call_count) def test_lsn_delete(self): self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id self.manager.lsn_create(self.context, self.net_id) self.manager.lsn_delete(self.context, self.lsn_id) self.assertIsNone(self.manager.lsn_get( self.context, self.net_id, raise_on_err=False)) def test_lsn_delete_not_existent(self): self.manager.lsn_delete(self.context, self.lsn_id) self.assertTrue(self.mock_lsn_api.lsn_delete.call_count) def test_lsn_port_get(self): lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id, self.sub_id, self.mac, self.lsn_id) res = self.manager.lsn_port_get(self.context, self.net_id, self.sub_id) self.assertEqual((self.lsn_id, self.lsn_port_id), res) def test_lsn_port_get_raise_not_found(self): self.assertRaises(p_exc.LsnPortNotFound, self.manager.lsn_port_get, self.context, self.net_id, self.sub_id) def test_lsn_port_get_silent_not_found(self): result = self.manager.lsn_port_get( self.context, self.net_id, self.sub_id, raise_on_err=False) expected = (None, None) self.assertEqual(expected, result) def test_lsn_port_get_sync_on_missing(self): return cfg.CONF.set_override('sync_on_missing_data', True, 'NSX_LSN') self.manager = lsn_man.PersistentLsnManager(mock.Mock()) self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id self.mock_lsn_api.lsn_port_by_subnet_get.return_value = ( self.lsn_id, self.lsn_port_id) with mock.patch.object(self.manager, 'lsn_save') as f: with mock.patch.object(self.manager, 'lsn_port_save') as g: self.manager.lsn_port_get( self.context, self.net_id, self.sub_id) self.assertTrue( self.mock_lsn_api.lsn_port_by_subnet_get.call_count) self.assertTrue( self.mock_lsn_api.lsn_port_info_get.call_count) self.assertTrue(f.call_count) self.assertTrue(g.call_count) def test_lsn_port_get_by_mac(self): lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id, self.sub_id, self.mac, self.lsn_id) res = self.manager.lsn_port_get_by_mac( self.context, self.net_id, self.mac) self.assertEqual((self.lsn_id, self.lsn_port_id), res) def test_lsn_port_get_by_mac_raise_not_found(self): self.assertRaises(p_exc.LsnPortNotFound, self.manager.lsn_port_get_by_mac, self.context, self.net_id, self.sub_id) def test_lsn_port_get_by_mac_silent_not_found(self): result = self.manager.lsn_port_get_by_mac( self.context, self.net_id, self.sub_id, raise_on_err=False) expected = (None, None) self.assertEqual(expected, result) def test_lsn_port_create(self): lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id subnet = {'subnet_id': self.sub_id, 'mac_address': self.mac} with mock.patch.object(self.manager, 'lsn_port_save') as f: result = self.manager.lsn_port_create( self.context, self.net_id, subnet) self.assertTrue( self.mock_lsn_api.lsn_port_create.call_count) self.assertTrue(f.call_count) self.assertEqual(self.lsn_port_id, result) def test_lsn_port_create_failure(self): subnet = {'subnet_id': self.sub_id, 'mac_address': self.mac} with mock.patch.object( self.manager, 'lsn_port_save', side_effect=p_exc.NsxPluginException(err_msg='')): self.assertRaises(p_exc.NsxPluginException, self.manager.lsn_port_create, self.context, self.net_id, subnet) self.assertTrue(self.mock_lsn_api.lsn_port_delete.call_count) def test_lsn_port_delete(self): lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id, self.sub_id, self.mac, self.lsn_id) self.manager.lsn_port_delete( self.context, self.lsn_id, self.lsn_port_id) expected = (None, None) self.assertEqual(expected, self.manager.lsn_port_get( self.context, self.lsn_id, self.sub_id, raise_on_err=False)) def test_lsn_port_delete_not_existent(self): self.manager.lsn_port_delete( self.context, self.lsn_id, self.lsn_port_id) self.assertTrue(self.mock_lsn_api.lsn_port_delete.call_count) def test_lsn_port_save(self): self.manager.lsn_save(self.context, self.net_id, self.lsn_id) self.manager.lsn_port_save(self.context, self.lsn_port_id, self.sub_id, self.mac, self.lsn_id) result = self.manager.lsn_port_get( self.context, self.net_id, self.sub_id, raise_on_err=False) self.assertEqual((self.lsn_id, self.lsn_port_id), result) class DhcpAgentNotifyAPITestCase(base.BaseTestCase): def setUp(self): super(DhcpAgentNotifyAPITestCase, self).setUp() self.notifier = nsx.DhcpAgentNotifyAPI(mock.Mock(), mock.Mock()) self.plugin = self.notifier.plugin self.lsn_manager = self.notifier.lsn_manager def _test_notify_port_update( self, ports, expected_count, expected_args=None): port = { 'id': 'foo_port_id', 'network_id': 'foo_network_id', 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}] } self.notifier.plugin.get_ports.return_value = ports self.notifier.notify(mock.ANY, {'port': port}, 'port.update.end') self.lsn_manager.lsn_port_update.assert_has_calls(expected_args) def test_notify_ports_update_no_ports(self): self._test_notify_port_update(None, 0, []) self._test_notify_port_update([], 0, []) def test_notify_ports_update_one_port(self): ports = [{ 'fixed_ips': [{'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'}], 'device_id': 'foo_device_id', 'device_owner': 'foo_device_owner', 'mac_address': 'fa:16:3e:da:1d:46' }] call_args = mock.call( mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[{'ip_address': '1.2.3.4', 'mac_address': 'fa:16:3e:da:1d:46'}], meta=[{'instance_id': 'foo_device_id', 'ip_address': '1.2.3.4'}]) self._test_notify_port_update(ports, 1, [call_args]) def test_notify_ports_update_ports_with_empty_device_id(self): ports = [{ 'fixed_ips': [{'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'}], 'device_id': '', 'device_owner': 'foo_device_owner', 'mac_address': 'fa:16:3e:da:1d:46' }] call_args = mock.call( mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[{'ip_address': '1.2.3.4', 'mac_address': 'fa:16:3e:da:1d:46'}], meta=[] ) self._test_notify_port_update(ports, 1, [call_args]) def test_notify_ports_update_ports_with_no_fixed_ips(self): ports = [{ 'fixed_ips': [], 'device_id': 'foo_device_id', 'device_owner': 'foo_device_owner', 'mac_address': 'fa:16:3e:da:1d:46' }] call_args = mock.call( mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[]) self._test_notify_port_update(ports, 1, [call_args]) def test_notify_ports_update_ports_with_no_fixed_ips_and_no_device(self): ports = [{ 'fixed_ips': [], 'device_id': '', 'device_owner': 'foo_device_owner', 'mac_address': 'fa:16:3e:da:1d:46' }] call_args = mock.call( mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[]) self._test_notify_port_update(ports, 0, [call_args]) def test_notify_ports_update_with_special_ports(self): ports = [{'fixed_ips': [], 'device_id': '', 'device_owner': n_consts.DEVICE_OWNER_DHCP, 'mac_address': 'fa:16:3e:da:1d:46'}, {'fixed_ips': [{'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'}], 'device_id': 'foo_device_id', 'device_owner': n_consts.DEVICE_OWNER_ROUTER_GW, 'mac_address': 'fa:16:3e:da:1d:46'}] call_args = mock.call( mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[]) self._test_notify_port_update(ports, 0, [call_args]) def test_notify_ports_update_many_ports(self): ports = [{'fixed_ips': [], 'device_id': '', 'device_owner': 'foo_device_owner', 'mac_address': 'fa:16:3e:da:1d:46'}, {'fixed_ips': [{'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'}], 'device_id': 'foo_device_id', 'device_owner': 'foo_device_owner', 'mac_address': 'fa:16:3e:da:1d:46'}] call_args = mock.call( mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[{'ip_address': '1.2.3.4', 'mac_address': 'fa:16:3e:da:1d:46'}], meta=[{'instance_id': 'foo_device_id', 'ip_address': '1.2.3.4'}]) self._test_notify_port_update(ports, 1, [call_args]) def _test_notify_subnet_action(self, action): with mock.patch.object(self.notifier, '_subnet_%s' % action) as f: self.notifier._handle_subnet_dhcp_access[action] = f subnet = {'subnet': mock.ANY} self.notifier.notify( mock.ANY, subnet, 'subnet.%s.end' % action) f.assert_called_once_with(mock.ANY, subnet) def test_notify_subnet_create(self): self._test_notify_subnet_action('create') def test_notify_subnet_update(self): self._test_notify_subnet_action('update') def test_notify_subnet_delete(self): self._test_notify_subnet_action('delete') def _test_subnet_create(self, enable_dhcp, exc=None, exc_obj=None, call_notify=True): subnet = { 'id': 'foo_subnet_id', 'enable_dhcp': enable_dhcp, 'network_id': 'foo_network_id', 'tenant_id': 'foo_tenant_id', 'cidr': '0.0.0.0/0' } if exc: self.plugin.create_port.side_effect = exc_obj or exc self.assertRaises(exc, self.notifier.notify, mock.ANY, {'subnet': subnet}, 'subnet.create.end') self.plugin.delete_subnet.assert_called_with( mock.ANY, subnet['id']) else: if call_notify: self.notifier.notify( mock.ANY, {'subnet': subnet}, 'subnet.create.end') if enable_dhcp: dhcp_port = { 'name': '', 'admin_state_up': True, 'network_id': 'foo_network_id', 'tenant_id': 'foo_tenant_id', 'device_owner': n_consts.DEVICE_OWNER_DHCP, 'mac_address': mock.ANY, 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}], 'device_id': '' } self.plugin.create_port.assert_called_once_with( mock.ANY, {'port': dhcp_port}) else: self.assertEqual(0, self.plugin.create_port.call_count) def test_subnet_create_enabled_dhcp(self): self._test_subnet_create(True) def test_subnet_create_disabled_dhcp(self): self._test_subnet_create(False) def test_subnet_create_raise_port_config_error(self): with mock.patch.object(nsx.db_base_plugin_v2.NeutronDbPluginV2, 'delete_port') as d: self._test_subnet_create( True, exc=n_exc.Conflict, exc_obj=p_exc.PortConfigurationError(lsn_id='foo_lsn_id', net_id='foo_net_id', port_id='foo_port_id')) d.assert_called_once_with(self.plugin, mock.ANY, 'foo_port_id') def test_subnet_update(self): subnet = { 'id': 'foo_subnet_id', 'network_id': 'foo_network_id', } self.lsn_manager.lsn_port_get.return_value = ('foo_lsn_id', 'foo_lsn_port_id') self.notifier.notify( mock.ANY, {'subnet': subnet}, 'subnet.update.end') self.lsn_manager.lsn_port_dhcp_configure.assert_called_once_with( mock.ANY, 'foo_lsn_id', 'foo_lsn_port_id', subnet) def test_subnet_update_raise_lsn_not_found(self): subnet = { 'id': 'foo_subnet_id', 'network_id': 'foo_network_id', } self.lsn_manager.lsn_port_get.side_effect = ( p_exc.LsnNotFound(entity='network', entity_id=subnet['network_id'])) self.assertRaises(p_exc.LsnNotFound, self.notifier.notify, mock.ANY, {'subnet': subnet}, 'subnet.update.end') def _test_subnet_update_lsn_port_not_found(self, dhcp_port): subnet = { 'id': 'foo_subnet_id', 'enable_dhcp': True, 'network_id': 'foo_network_id', 'tenant_id': 'foo_tenant_id' } self.lsn_manager.lsn_port_get.side_effect = ( p_exc.LsnPortNotFound(lsn_id='foo_lsn_id', entity='subnet', entity_id=subnet['id'])) self.notifier.plugin.get_ports.return_value = dhcp_port count = 0 if dhcp_port is None else 1 with mock.patch.object(nsx, 'handle_port_dhcp_access') as h: self.notifier.notify( mock.ANY, {'subnet': subnet}, 'subnet.update.end') self.assertEqual(count, h.call_count) if not dhcp_port: self._test_subnet_create(enable_dhcp=True, exc=None, call_notify=False) def test_subnet_update_lsn_port_not_found_without_dhcp_port(self): self._test_subnet_update_lsn_port_not_found(None) def test_subnet_update_lsn_port_not_found_with_dhcp_port(self): self._test_subnet_update_lsn_port_not_found([mock.ANY]) def _test_subnet_delete(self, ports=None): subnet = { 'id': 'foo_subnet_id', 'network_id': 'foo_network_id', 'cidr': '0.0.0.0/0' } self.plugin.get_ports.return_value = ports self.notifier.notify(mock.ANY, {'subnet': subnet}, 'subnet.delete.end') filters = { 'network_id': [subnet['network_id']], 'device_owner': [n_consts.DEVICE_OWNER_DHCP] } self.plugin.get_ports.assert_called_once_with( mock.ANY, filters=filters) if ports: self.plugin.delete_port.assert_called_once_with( mock.ANY, ports[0]['id']) else: self.assertEqual(0, self.plugin.delete_port.call_count) def test_subnet_delete_enabled_dhcp_no_ports(self): self._test_subnet_delete() def test_subnet_delete_enabled_dhcp_with_dhcp_port(self): self._test_subnet_delete([{'id': 'foo_port_id'}]) class DhcpTestCase(base.BaseTestCase): def setUp(self): super(DhcpTestCase, self).setUp() self.plugin = mock.Mock() self.plugin.lsn_manager = mock.Mock() def test_handle_create_network(self): network = {'id': 'foo_network_id'} nsx.handle_network_dhcp_access( self.plugin, mock.ANY, network, 'create_network') self.plugin.lsn_manager.lsn_create.assert_called_once_with( mock.ANY, network['id']) def test_handle_create_network_router_external(self): network = {'id': 'foo_network_id', 'router:external': True} nsx.handle_network_dhcp_access( self.plugin, mock.ANY, network, 'create_network') self.assertFalse(self.plugin.lsn_manager.lsn_create.call_count) def test_handle_delete_network(self): network_id = 'foo_network_id' self.plugin.lsn_manager.lsn_delete_by_network.return_value = ( 'foo_lsn_id') nsx.handle_network_dhcp_access( self.plugin, mock.ANY, network_id, 'delete_network') self.plugin.lsn_manager.lsn_delete_by_network.assert_called_once_with( mock.ANY, 'foo_network_id') def _test_handle_create_dhcp_owner_port(self, exc=None): subnet = { 'cidr': '0.0.0.0/0', 'id': 'foo_subnet_id' } port = { 'id': 'foo_port_id', 'device_owner': n_consts.DEVICE_OWNER_DHCP, 'mac_address': 'aa:bb:cc:dd:ee:ff', 'network_id': 'foo_network_id', 'fixed_ips': [{'subnet_id': subnet['id']}] } expected_data = { 'subnet_id': subnet['id'], 'ip_address': subnet['cidr'], 'mac_address': port['mac_address'] } self.plugin.get_subnet.return_value = subnet if exc is None: nsx.handle_port_dhcp_access( self.plugin, mock.ANY, port, 'create_port') (self.plugin.lsn_manager.lsn_port_dhcp_setup. assert_called_once_with(mock.ANY, port['network_id'], port['id'], expected_data, subnet)) else: self.plugin.lsn_manager.lsn_port_dhcp_setup.side_effect = exc self.assertRaises(n_exc.NeutronException, nsx.handle_port_dhcp_access, self.plugin, mock.ANY, port, 'create_port') def test_handle_create_dhcp_owner_port(self): self._test_handle_create_dhcp_owner_port() def test_handle_create_dhcp_owner_port_raise_port_config_error(self): config_error = p_exc.PortConfigurationError(lsn_id='foo_lsn_id', net_id='foo_net_id', port_id='foo_port_id') self._test_handle_create_dhcp_owner_port(exc=config_error) def test_handle_delete_dhcp_owner_port(self): port = { 'id': 'foo_port_id', 'device_owner': n_consts.DEVICE_OWNER_DHCP, 'network_id': 'foo_network_id', 'fixed_ips': [], 'mac_address': 'aa:bb:cc:dd:ee:ff' } nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, 'delete_port') self.plugin.lsn_manager.lsn_port_dispose.assert_called_once_with( mock.ANY, port['network_id'], port['mac_address']) def _test_handle_user_port(self, action, handler): port = { 'id': 'foo_port_id', 'device_owner': 'foo_device_owner', 'network_id': 'foo_network_id', 'mac_address': 'aa:bb:cc:dd:ee:ff', 'fixed_ips': [{'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'}] } expected_data = { 'ip_address': '1.2.3.4', 'mac_address': 'aa:bb:cc:dd:ee:ff' } self.plugin.get_subnet.return_value = {'enable_dhcp': True} nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action) handler.assert_called_once_with( mock.ANY, port['network_id'], 'foo_subnet_id', expected_data) def test_handle_create_user_port(self): self._test_handle_user_port( 'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add) def test_handle_delete_user_port(self): self._test_handle_user_port( 'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove) def _test_handle_user_port_disabled_dhcp(self, action, handler): port = { 'id': 'foo_port_id', 'device_owner': 'foo_device_owner', 'network_id': 'foo_network_id', 'mac_address': 'aa:bb:cc:dd:ee:ff', 'fixed_ips': [{'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'}] } self.plugin.get_subnet.return_value = {'enable_dhcp': False} nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action) self.assertEqual(0, handler.call_count) def test_handle_create_user_port_disabled_dhcp(self): self._test_handle_user_port_disabled_dhcp( 'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add) def test_handle_delete_user_port_disabled_dhcp(self): self._test_handle_user_port_disabled_dhcp( 'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove) def _test_handle_user_port_no_fixed_ips(self, action, handler): port = { 'id': 'foo_port_id', 'device_owner': 'foo_device_owner', 'network_id': 'foo_network_id', 'fixed_ips': [] } nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action) self.assertEqual(0, handler.call_count) def test_handle_create_user_port_no_fixed_ips(self): self._test_handle_user_port_no_fixed_ips( 'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add) def test_handle_delete_user_port_no_fixed_ips(self): self._test_handle_user_port_no_fixed_ips( 'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove) class MetadataTestCase(base.BaseTestCase): def setUp(self): super(MetadataTestCase, self).setUp() self.plugin = mock.Mock() self.plugin.lsn_manager = mock.Mock() def _test_handle_port_metadata_access_special_owners( self, owner, dev_id='foo_device_id', ips=None): port = { 'id': 'foo_port_id', 'device_owner': owner, 'device_id': dev_id, 'fixed_ips': ips or [] } nsx.handle_port_metadata_access(self.plugin, mock.ANY, port, mock.ANY) self.assertFalse( self.plugin.lsn_manager.lsn_port_meta_host_add.call_count) self.assertFalse( self.plugin.lsn_manager.lsn_port_meta_host_remove.call_count) def test_handle_port_metadata_access_external_network(self): port = { 'id': 'foo_port_id', 'device_owner': 'foo_device_owner', 'device_id': 'foo_device_id', 'network_id': 'foo_network_id', 'fixed_ips': [{'subnet_id': 'foo_subnet'}] } self.plugin.get_network.return_value = {'router:external': True} nsx.handle_port_metadata_access(self.plugin, mock.ANY, port, mock.ANY) self.assertFalse( self.plugin.lsn_manager.lsn_port_meta_host_add.call_count) self.assertFalse( self.plugin.lsn_manager.lsn_port_meta_host_remove.call_count) def test_handle_port_metadata_access_dhcp_port(self): self._test_handle_port_metadata_access_special_owners( n_consts.DEVICE_OWNER_DHCP, [{'subnet_id': 'foo_subnet'}]) def test_handle_port_metadata_access_router_port(self): self._test_handle_port_metadata_access_special_owners( n_consts.DEVICE_OWNER_ROUTER_INTF, [{'subnet_id': 'foo_subnet'}]) def test_handle_port_metadata_access_no_device_id(self): self._test_handle_port_metadata_access_special_owners( n_consts.DEVICE_OWNER_DHCP, '') def test_handle_port_metadata_access_no_fixed_ips(self): self._test_handle_port_metadata_access_special_owners( 'foo', 'foo', None) def _test_handle_port_metadata_access(self, is_delete, raise_exc=False): port = { 'id': 'foo_port_id', 'device_owner': 'foo_device_id', 'network_id': 'foo_network_id', 'device_id': 'foo_device_id', 'tenant_id': 'foo_tenant_id', 'fixed_ips': [ {'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'} ] } meta = { 'instance_id': port['device_id'], 'tenant_id': port['tenant_id'], 'ip_address': port['fixed_ips'][0]['ip_address'] } self.plugin.get_network.return_value = {'router:external': False} if is_delete: mock_func = self.plugin.lsn_manager.lsn_port_meta_host_remove else: mock_func = self.plugin.lsn_manager.lsn_port_meta_host_add if raise_exc: mock_func.side_effect = p_exc.PortConfigurationError( lsn_id='foo_lsn_id', net_id='foo_net_id', port_id=None) with mock.patch.object(nsx.db_base_plugin_v2.NeutronDbPluginV2, 'delete_port') as d: self.assertRaises(p_exc.PortConfigurationError, nsx.handle_port_metadata_access, self.plugin, mock.ANY, port, is_delete=is_delete) if not is_delete: d.assert_called_once_with(mock.ANY, mock.ANY, port['id']) else: self.assertFalse(d.call_count) else: nsx.handle_port_metadata_access( self.plugin, mock.ANY, port, is_delete=is_delete) mock_func.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, meta) def test_handle_port_metadata_access_on_delete_true(self): self._test_handle_port_metadata_access(True) def test_handle_port_metadata_access_on_delete_false(self): self._test_handle_port_metadata_access(False) def test_handle_port_metadata_access_on_delete_true_raise(self): self._test_handle_port_metadata_access(True, raise_exc=True) def test_handle_port_metadata_access_on_delete_false_raise(self): self._test_handle_port_metadata_access(False, raise_exc=True) def _test_handle_router_metadata_access( self, is_port_found, raise_exc=False): subnet = { 'id': 'foo_subnet_id', 'network_id': 'foo_network_id' } interface = { 'subnet_id': subnet['id'], 'port_id': 'foo_port_id' } mock_func = self.plugin.lsn_manager.lsn_metadata_configure if not is_port_found: self.plugin.get_port.side_effect = n_exc.NotFound if raise_exc: with mock.patch.object(nsx.l3_db.L3_NAT_db_mixin, 'remove_router_interface') as d: mock_func.side_effect = p_exc.NsxPluginException(err_msg='') self.assertRaises(p_exc.NsxPluginException, nsx.handle_router_metadata_access, self.plugin, mock.ANY, 'foo_router_id', interface) d.assert_called_once_with(mock.ANY, mock.ANY, 'foo_router_id', interface) else: nsx.handle_router_metadata_access( self.plugin, mock.ANY, 'foo_router_id', interface) mock_func.assert_called_once_with( mock.ANY, subnet['id'], is_port_found) def test_handle_router_metadata_access_add_interface(self): self._test_handle_router_metadata_access(True) def test_handle_router_metadata_access_delete_interface(self): self._test_handle_router_metadata_access(False) def test_handle_router_metadata_access_raise_error_on_add(self): self._test_handle_router_metadata_access(True, raise_exc=True) def test_handle_router_metadata_access_raise_error_on_delete(self): self._test_handle_router_metadata_access(True, raise_exc=False) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_mh/test_utils.py0000666000175100017510000004344313244523345024324 0ustar zuulzuul00000000000000# Copyright (c) 2013 VMware. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.db import api as db_api from neutron.tests import base from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef from neutron_lib.api.definitions import provider_net as pnet from oslo_utils import uuidutils from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import nsx_utils from vmware_nsx.common import utils from vmware_nsx.db import nsx_models from vmware_nsx.nsxlib import mh as nsxlib from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsxlib.mh import base as nsx_base class NsxUtilsTestCase(base.BaseTestCase): def _mock_port_mapping_db_calls(self, ret_value): # Mock relevant db calls # This will allow for avoiding setting up the plugin # for creating db entries mock.patch(vmware.nsx_method('get_nsx_switch_and_port_id', module_name='db.db'), return_value=ret_value).start() mock.patch(vmware.nsx_method('add_neutron_nsx_port_mapping', module_name='db.db')).start() mock.patch(vmware.nsx_method('delete_neutron_nsx_port_mapping', module_name='db.db')).start() def _mock_network_mapping_db_calls(self, ret_value): # Mock relevant db calls # This will allow for avoiding setting up the plugin # for creating db entries mock.patch(vmware.nsx_method('get_nsx_switch_ids', module_name='db.db'), return_value=ret_value).start() mock.patch(vmware.nsx_method('add_neutron_nsx_network_mapping', module_name='db.db')).start() def _mock_router_mapping_db_calls(self, ret_value): # Mock relevant db calls # This will allow for avoiding setting up the plugin # for creating db entries mock.patch(vmware.nsx_method('get_nsx_router_id', module_name='db.db'), return_value=ret_value).start() mock.patch(vmware.nsx_method('add_neutron_nsx_router_mapping', module_name='db.db')).start() def _verify_get_nsx_switch_and_port_id(self, exp_ls_uuid, exp_lp_uuid): # The nsxlib and db calls are mocked, therefore the cluster # and the neutron_port_id parameters can be set to None ls_uuid, lp_uuid = nsx_utils.get_nsx_switch_and_port_id( db_api.get_reader_session(), None, None) self.assertEqual(exp_ls_uuid, ls_uuid) self.assertEqual(exp_lp_uuid, lp_uuid) def _verify_get_nsx_switch_ids(self, exp_ls_uuids): # The nsxlib and db calls are mocked, therefore the cluster # and the neutron_router_id parameters can be set to None ls_uuids = nsx_utils.get_nsx_switch_ids( db_api.get_reader_session(), None, None) for ls_uuid in ls_uuids or []: self.assertIn(ls_uuid, exp_ls_uuids) exp_ls_uuids.remove(ls_uuid) self.assertFalse(exp_ls_uuids) def _verify_get_nsx_router_id(self, exp_lr_uuid): neutron_router_id = uuidutils.generate_uuid() lr_uuid = nsx_utils.get_nsx_router_id(db_api.get_reader_session(), None, neutron_router_id) self.assertEqual(exp_lr_uuid, lr_uuid) def test_get_nsx_switch_and_port_id_from_db_mappings(self): # This test is representative of the 'standard' case in which both the # switch and the port mappings were stored in the neutron db exp_ls_uuid = uuidutils.generate_uuid() exp_lp_uuid = uuidutils.generate_uuid() ret_value = exp_ls_uuid, exp_lp_uuid self._mock_port_mapping_db_calls(ret_value) self._verify_get_nsx_switch_and_port_id(exp_ls_uuid, exp_lp_uuid) def test_get_nsx_switch_and_port_id_only_port_db_mapping(self): # This test is representative of the case in which a port with a nsx # db mapping in the havana db was upgraded to icehouse exp_ls_uuid = uuidutils.generate_uuid() exp_lp_uuid = uuidutils.generate_uuid() ret_value = None, exp_lp_uuid self._mock_port_mapping_db_calls(ret_value) with mock.patch(vmware.nsx_method('query_lswitch_lports', module_name='nsxlib.mh.switch'), return_value=[{'uuid': exp_lp_uuid, '_relations': { 'LogicalSwitchConfig': { 'uuid': exp_ls_uuid} }}]): self._verify_get_nsx_switch_and_port_id(exp_ls_uuid, exp_lp_uuid) def test_get_nsx_switch_and_port_id_no_db_mapping(self): # This test is representative of the case where db mappings where not # found for a given port identifier exp_ls_uuid = uuidutils.generate_uuid() exp_lp_uuid = uuidutils.generate_uuid() ret_value = None, None self._mock_port_mapping_db_calls(ret_value) with mock.patch(vmware.nsx_method('query_lswitch_lports', module_name='nsxlib.mh.switch'), return_value=[{'uuid': exp_lp_uuid, '_relations': { 'LogicalSwitchConfig': { 'uuid': exp_ls_uuid} }}]): self._verify_get_nsx_switch_and_port_id(exp_ls_uuid, exp_lp_uuid) def test_get_nsx_switch_and_port_id_no_mappings_returns_none(self): # This test verifies that the function return (None, None) if the # mappings are not found both in the db and the backend ret_value = None, None self._mock_port_mapping_db_calls(ret_value) with mock.patch(vmware.nsx_method('query_lswitch_lports', module_name='nsxlib.mh.switch'), return_value=[]): self._verify_get_nsx_switch_and_port_id(None, None) def test_get_nsx_switch_ids_from_db_mappings(self): # This test is representative of the 'standard' case in which the # lswitch mappings were stored in the neutron db exp_ls_uuids = [uuidutils.generate_uuid()] self._mock_network_mapping_db_calls(exp_ls_uuids) self._verify_get_nsx_switch_ids(exp_ls_uuids) def test_get_nsx_switch_ids_no_db_mapping(self): # This test is representative of the case where db mappings where not # found for a given network identifier exp_ls_uuids = [uuidutils.generate_uuid()] self._mock_network_mapping_db_calls(None) with mock.patch(vmware.nsx_method('get_lswitches', module_name='nsxlib.mh.switch'), return_value=[{'uuid': uuid} for uuid in exp_ls_uuids]): self._verify_get_nsx_switch_ids(exp_ls_uuids) def test_get_nsx_switch_ids_no_mapping_returns_None(self): # This test verifies that the function returns None if the mappings # are not found both in the db and in the backend self._mock_network_mapping_db_calls(None) with mock.patch(vmware.nsx_method('get_lswitches', module_name='nsxlib.mh.switch'), return_value=[]): self._verify_get_nsx_switch_ids(None) def test_get_nsx_router_id_from_db_mappings(self): # This test is representative of the 'standard' case in which the # router mapping was stored in the neutron db exp_lr_uuid = uuidutils.generate_uuid() self._mock_router_mapping_db_calls(exp_lr_uuid) self._verify_get_nsx_router_id(exp_lr_uuid) def test_get_nsx_router_id_no_db_mapping(self): # This test is representative of the case where db mappings where not # found for a given port identifier exp_lr_uuid = uuidutils.generate_uuid() self._mock_router_mapping_db_calls(None) with mock.patch(vmware.nsx_method('query_lrouters', module_name='nsxlib.mh.router'), return_value=[{'uuid': exp_lr_uuid}]): self._verify_get_nsx_router_id(exp_lr_uuid) def test_get_nsx_router_id_no_mapping_returns_None(self): # This test verifies that the function returns None if the mapping # are not found both in the db and in the backend self._mock_router_mapping_db_calls(None) with mock.patch(vmware.nsx_method('query_lrouters', module_name='nsxlib.mh.router'), return_value=[]): self._verify_get_nsx_router_id(None) def test_check_and_truncate_name_with_none(self): name = None result = utils.check_and_truncate(name) self.assertEqual('', result) def test_check_and_truncate_name_with_short_name(self): name = 'foo_port_name' result = utils.check_and_truncate(name) self.assertEqual(name, result) def test_check_and_truncate_name_long_name(self): name = 'this_is_a_port_whose_name_is_longer_than_40_chars' result = utils.check_and_truncate(name) self.assertEqual(len(result), utils.MAX_DISPLAY_NAME_LEN) def test_build_uri_path_plain(self): result = nsxlib._build_uri_path('RESOURCE') self.assertEqual("%s/%s" % (nsxlib.URI_PREFIX, 'RESOURCE'), result) def test_build_uri_path_with_field(self): result = nsxlib._build_uri_path('RESOURCE', fields='uuid') expected = "%s/%s?fields=uuid" % (nsxlib.URI_PREFIX, 'RESOURCE') self.assertEqual(expected, result) def test_build_uri_path_with_filters(self): filters = {"tag": 'foo', "tag_scope": "scope_foo"} result = nsxlib._build_uri_path('RESOURCE', filters=filters) expected = ( "%s/%s?tag=foo&tag_scope=scope_foo" % (nsxlib.URI_PREFIX, 'RESOURCE')) self.assertEqual(expected, result) def test_build_uri_path_with_resource_id(self): res = 'RESOURCE' res_id = 'resource_id' result = nsxlib._build_uri_path(res, resource_id=res_id) expected = "%s/%s/%s" % (nsxlib.URI_PREFIX, res, res_id) self.assertEqual(expected, result) def test_build_uri_path_with_parent_and_resource_id(self): parent_res = 'RESOURCE_PARENT' child_res = 'RESOURCE_CHILD' res = '%s/%s' % (child_res, parent_res) par_id = 'parent_resource_id' res_id = 'resource_id' result = nsxlib._build_uri_path( res, parent_resource_id=par_id, resource_id=res_id) expected = ("%s/%s/%s/%s/%s" % (nsxlib.URI_PREFIX, parent_res, par_id, child_res, res_id)) self.assertEqual(expected, result) def test_build_uri_path_with_attachment(self): parent_res = 'RESOURCE_PARENT' child_res = 'RESOURCE_CHILD' res = '%s/%s' % (child_res, parent_res) par_id = 'parent_resource_id' res_id = 'resource_id' result = nsxlib._build_uri_path(res, parent_resource_id=par_id, resource_id=res_id, is_attachment=True) expected = ("%s/%s/%s/%s/%s/%s" % (nsxlib.URI_PREFIX, parent_res, par_id, child_res, res_id, 'attachment')) self.assertEqual(expected, result) def test_build_uri_path_with_extra_action(self): parent_res = 'RESOURCE_PARENT' child_res = 'RESOURCE_CHILD' res = '%s/%s' % (child_res, parent_res) par_id = 'parent_resource_id' res_id = 'resource_id' result = nsxlib._build_uri_path(res, parent_resource_id=par_id, resource_id=res_id, extra_action='doh') expected = ("%s/%s/%s/%s/%s/%s" % (nsxlib.URI_PREFIX, parent_res, par_id, child_res, res_id, 'doh')) self.assertEqual(expected, result) def _mock_sec_group_mapping_db_calls(self, ret_value): mock.patch(vmware.nsx_method('get_nsx_security_group_id', module_name='db.db'), return_value=ret_value).start() mock.patch(vmware.nsx_method('add_neutron_nsx_security_group_mapping', module_name='db.db')).start() def _verify_get_nsx_sec_profile_id(self, exp_sec_prof_uuid): # The nsxlib and db calls are mocked, therefore the cluster # and the neutron_id parameters can be set to None sec_prof_uuid = nsx_utils.get_nsx_security_group_id( db_api.get_reader_session(), None, None) self.assertEqual(exp_sec_prof_uuid, sec_prof_uuid) def test_get_nsx_sec_profile_id_from_db_mappings(self): # This test is representative of the 'standard' case in which the # security group mapping was stored in the neutron db exp_sec_prof_uuid = uuidutils.generate_uuid() self._mock_sec_group_mapping_db_calls(exp_sec_prof_uuid) self._verify_get_nsx_sec_profile_id(exp_sec_prof_uuid) def test_get_nsx_sec_profile_id_no_db_mapping(self): # This test is representative of the case where db mappings where not # found for a given security profile identifier exp_sec_prof_uuid = uuidutils.generate_uuid() self._mock_sec_group_mapping_db_calls(None) with mock.patch(vmware.nsx_method('query_security_profiles', module_name='nsxlib.mh.secgroup'), return_value=[{'uuid': exp_sec_prof_uuid}]): self._verify_get_nsx_sec_profile_id(exp_sec_prof_uuid) def test_get_nsx_sec_profile_id_no_mapping_returns_None(self): # This test verifies that the function returns None if the mapping # are not found both in the db and in the backend self._mock_sec_group_mapping_db_calls(None) with mock.patch(vmware.nsx_method('query_security_profiles', module_name='nsxlib.mh.secgroup'), return_value=[]): self._verify_get_nsx_sec_profile_id(None) def test_convert_to_nsx_transport_zones_no_multiprovider(self): test_net = {'id': 'whatever'} results = nsx_utils.convert_to_nsx_transport_zones( 'meh_zone_uuid', test_net, default_transport_type='meh_transport_type') self.assertEqual(1, len(results)) result = results[0] self.assertEqual('meh_zone_uuid', result['zone_uuid']) self.assertEqual('meh_transport_type', result['transport_type']) def _verify_nsx_transport_zones(self, results): self.assertEqual(2, len(results)) result_1 = results[0] self.assertEqual(utils.NetworkTypes.BRIDGE, result_1['transport_type']) self.assertEqual([{'transport': 66}], result_1['binding_config']['vlan_translation']) self.assertEqual('whatever_tz_1', result_1['zone_uuid']) result_2 = results[1] self.assertEqual(utils.NetworkTypes.STT, result_2['transport_type']) self.assertNotIn('binding_config', result_2) self.assertEqual('whatever_tz_2', result_2['zone_uuid']) def test_convert_to_nsx_transport_zones_with_bindings(self): binding_1 = nsx_models.TzNetworkBinding( 'whatever', utils.NetworkTypes.VLAN, 'whatever_tz_1', 66) binding_2 = nsx_models.TzNetworkBinding( 'whatever', utils.NetworkTypes.STT, 'whatever_tz_2', None) results = nsx_utils.convert_to_nsx_transport_zones( 'meh_zone_uuid', None, bindings=[binding_1, binding_2]) self._verify_nsx_transport_zones(results) def test_convert_to_nsx_transport_zones_with_multiprovider(self): segments = [ {pnet.NETWORK_TYPE: utils.NetworkTypes.VLAN, pnet.PHYSICAL_NETWORK: 'whatever_tz_1', pnet.SEGMENTATION_ID: 66}, {pnet.NETWORK_TYPE: utils.NetworkTypes.STT, pnet.PHYSICAL_NETWORK: 'whatever_tz_2'}, ] results = nsx_utils.convert_to_nsx_transport_zones( 'meh_zone_uuid', {'id': 'whatever_net', mpnet_apidef.SEGMENTS: segments}) self._verify_nsx_transport_zones(results) class ClusterManagementTestCase(nsx_base.NsxlibTestCase): def test_cluster_in_readonly_mode(self): with mock.patch.object(self.fake_cluster.api_client, 'request', side_effect=api_exc.ReadOnlyMode): self.assertRaises(nsx_exc.MaintenanceInProgress, nsxlib.do_request, cluster=self.fake_cluster) def test_cluster_method_not_implemented(self): self.assertRaises(api_exc.NsxApiException, nsxlib.do_request, nsxlib.HTTP_GET, nsxlib._build_uri_path('MY_FAKE_RESOURCE', resource_id='foo'), cluster=self.fake_cluster) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_mh/__init__.py0000666000175100017510000000000013244523345023642 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_mh/test_plugin.py0000666000175100017510000014474613244523345024472 0ustar zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import uuid import mock from neutron.extensions import l3 from neutron.extensions import securitygroup as secgrp from neutron.tests.unit import _test_extension_portbindings as test_bindings import neutron.tests.unit.db.test_db_base_plugin_v2 as test_plugin from neutron.tests.unit.extensions import test_extra_dhcp_opt as test_dhcpopts import neutron.tests.unit.extensions.test_l3 as test_l3_plugin import neutron.tests.unit.extensions.test_l3_ext_gw_mode as test_ext_gw_mode import neutron.tests.unit.extensions.test_securitygroup as ext_sg from neutron.tests.unit import testlib_api from neutron_lib.api.definitions import dvr as dvr_apidef from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import l3_ext_gw_mode as l3_egm_apidef from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as pnet from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as ntn_exc from neutron_lib.plugins import directory from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log from oslo_utils import uuidutils from sqlalchemy import exc as sql_exc import webob.exc from vmware_nsx.api_client import exception as api_exc from vmware_nsx.api_client import version as ver_module from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import sync from vmware_nsx.common import utils from vmware_nsx.db import db as nsx_db from vmware_nsx.nsxlib import mh as nsxlib from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.extensions import test_metadata from vmware_nsx.tests.unit.nsx_mh.apiclient import fake from vmware_nsx.tests.unit import test_utils LOG = log.getLogger(__name__) class NsxPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): def _create_network(self, fmt, name, admin_state_up, arg_list=None, providernet_args=None, set_context=False, tenant_id=None, **kwargs): tenant_id = tenant_id or self._tenant_id data = {'network': {'name': name, 'admin_state_up': admin_state_up, 'tenant_id': tenant_id}} # Fix to allow the router:external attribute and any other # attributes containing a colon to be passed with # a double underscore instead kwargs = dict((k.replace('__', ':'), v) for k, v in kwargs.items()) if extnet_apidef.EXTERNAL in kwargs: arg_list = (extnet_apidef.EXTERNAL, ) + (arg_list or ()) attrs = kwargs if providernet_args: attrs.update(providernet_args) for arg in (('admin_state_up', 'tenant_id', 'shared') + (arg_list or ())): # Arg must be present if arg in kwargs: data['network'][arg] = kwargs[arg] network_req = self.new_create_request('networks', data, fmt) if set_context and tenant_id: # create a specific auth context for this request network_req.environ['neutron.context'] = context.Context( '', tenant_id) return network_req.get_response(self.api) def setUp(self, plugin=vmware.PLUGIN_NAME, ext_mgr=None, service_plugins=None): test_utils.override_nsx_ini_test() # mock api client self.fc = fake.FakeClient(vmware.STUBS_PATH) self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True) self.mock_instance = self.mock_nsx.start() # Avoid runs of the synchronizer looping call patch_sync = mock.patch.object(sync, '_start_loopingcall') patch_sync.start() # Emulate tests against NSX 2.x self.mock_instance.return_value.get_version.return_value = ( ver_module.Version("2.9")) self.mock_instance.return_value.request.side_effect = ( self.fc.fake_request) super(NsxPluginV2TestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) # Newly created port's status is always 'DOWN' till NSX wires them. self.port_create_status = constants.PORT_STATUS_DOWN cfg.CONF.set_override('metadata_mode', None, 'NSX') self.addCleanup(self.fc.reset_all) class TestBasicGet(test_plugin.TestBasicGet, NsxPluginV2TestCase): pass class TestV2HTTPResponse(test_plugin.TestV2HTTPResponse, NsxPluginV2TestCase): pass class TestPortsV2(NsxPluginV2TestCase, test_plugin.TestPortsV2, test_bindings.PortBindingsTestCase, test_bindings.PortBindingsHostTestCaseMixin, test_bindings.PortBindingsVnicTestCaseMixin): VIF_TYPE = portbindings.VIF_TYPE_OVS HAS_PORT_FILTER = True def _test_exhaust_ports(self, providernet_args=None): with self.network(name='testnet', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)) as net: with self.subnet(network=net) as sub: with self.port(subnet=sub): # creating another port should see an exception self._create_port('json', net['network']['id'], 400) def test_exhaust_ports_overlay_network(self): cfg.CONF.set_override('max_lp_per_overlay_ls', 1, group='NSX') self._test_exhaust_ports() def test_exhaust_ports_bridged_network(self): cfg.CONF.set_override('max_lp_per_bridged_ls', 1, group="NSX") providernet_args = {pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: 'tzuuid'} self._test_exhaust_ports(providernet_args=providernet_args) def test_update_port_delete_ip(self): # This test case overrides the default because the nsx plugin # implements port_security/security groups and it is not allowed # to remove an ip address from a port unless the security group # is first removed. with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': [], secgrp.SECURITYGROUPS: []}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(res['port']['admin_state_up'], data['port']['admin_state_up']) self.assertEqual(res['port']['fixed_ips'], data['port']['fixed_ips']) def test_create_port_name_exceeds_40_chars(self): name = 'this_is_a_port_whose_name_is_longer_than_40_chars' with self.port(name=name) as port: # Assert the neutron name is not truncated self.assertEqual(name, port['port']['name']) def _verify_no_orphan_left(self, net_id): # Verify no port exists on net # ie: cleanup on db was successful query_params = "network_id=%s" % net_id self._test_list_resources('port', [], query_params=query_params) # Also verify no orphan port was left on nsx # no port should be there at all self.assertFalse(self.fc._fake_lswitch_lport_dict) def test_create_port_nsx_error_no_orphan_left(self): with mock.patch.object(nsxlib.switch, 'create_lport', side_effect=api_exc.NsxApiException): with self.network() as net: net_id = net['network']['id'] self._create_port(self.fmt, net_id, webob.exc.HTTPInternalServerError.code) self._verify_no_orphan_left(net_id) def test_create_port_neutron_error_no_orphan_left(self): with mock.patch.object(nsx_db, 'add_neutron_nsx_port_mapping', side_effect=ntn_exc.NeutronException): with self.network() as net: net_id = net['network']['id'] self._create_port(self.fmt, net_id, webob.exc.HTTPInternalServerError.code) self._verify_no_orphan_left(net_id) def test_create_port_db_error_no_orphan_left(self): db_exception = db_exc.DBError( inner_exception=sql_exc.IntegrityError(mock.ANY, mock.ANY, mock.ANY)) with mock.patch.object(nsx_db, 'add_neutron_nsx_port_mapping', side_effect=db_exception): with self.network() as net: with self.port(device_owner=constants.DEVICE_OWNER_DHCP): self._verify_no_orphan_left(net['network']['id']) def test_create_port_maintenance_returns_503(self): with self.network() as net: with mock.patch.object(nsxlib, 'do_request', side_effect=nsx_exc.MaintenanceInProgress): data = {'port': {'network_id': net['network']['id'], 'admin_state_up': False, 'fixed_ips': [], 'tenant_id': self._tenant_id}} plugin = directory.get_plugin() with mock.patch.object(plugin, 'get_network', return_value=net['network']): port_req = self.new_create_request('ports', data, self.fmt) res = port_req.get_response(self.api) self.assertEqual(webob.exc.HTTPServiceUnavailable.code, res.status_int) class TestNetworksV2(test_plugin.TestNetworksV2, NsxPluginV2TestCase): def test_create_network_vlan_transparent(self): self.skipTest("Currently no support in plugin for this") def _test_create_bridge_network(self, vlan_id=0): net_type = 'vlan' if vlan_id else 'flat' name = 'bridge_net' expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (pnet.NETWORK_TYPE, net_type), (pnet.PHYSICAL_NETWORK, 'tzuuid'), (pnet.SEGMENTATION_ID, vlan_id)] providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: 'tzuuid'} if vlan_id: providernet_args[pnet.SEGMENTATION_ID] = vlan_id with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)) as net: for k, v in expected: self.assertEqual(net['network'][k], v) def test_create_bridge_network(self): self._test_create_bridge_network() def test_create_bridge_vlan_network(self): self._test_create_bridge_network(vlan_id=123) def test_create_bridge_vlan_network_outofrange_returns_400(self): with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_bridge_network(vlan_id=5000) self.assertEqual(ctx_manager.exception.code, 400) def test_create_l3_ext_network_fails_if_not_external(self): net_type = 'l3_ext' name = 'l3_ext_net' providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: 'l3gwuuid', pnet.SEGMENTATION_ID: 123} with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)): pass self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPBadRequest.code) def test_list_networks_filter_by_id(self): # We add this unit test to cover some logic specific to the # nsx plugin with self.network(name='net1') as net1: with self.network(name='net2') as net2: query_params = 'id=%s' % net1['network']['id'] self._test_list_resources('network', [net1], query_params=query_params) query_params += '&id=%s' % net2['network']['id'] self._test_list_resources('network', [net1, net2], query_params=query_params) def test_delete_network_after_removing_subet(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' fmt = 'json' # Create new network res = self._create_network(fmt=fmt, name='net', admin_state_up=True) network = self.deserialize(fmt, res) subnet = self._make_subnet(fmt, network, gateway_ip, cidr, ip_version=4) req = self.new_delete_request('subnets', subnet['subnet']['id']) sub_del_res = req.get_response(self.api) self.assertEqual(sub_del_res.status_int, 204) req = self.new_delete_request('networks', network['network']['id']) net_del_res = req.get_response(self.api) self.assertEqual(net_del_res.status_int, 204) def test_list_networks_with_shared(self): with self.network(name='net1'): with self.network(name='net2', shared=True): req = self.new_list_request('networks') res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(len(res['networks']), 2) req_2 = self.new_list_request('networks') req_2.environ['neutron.context'] = context.Context('', 'somebody') res = self.deserialize('json', req_2.get_response(self.api)) # tenant must see a single network self.assertEqual(len(res['networks']), 1) def test_create_network_name_exceeds_40_chars(self): name = 'this_is_a_network_whose_name_is_longer_than_40_chars' with self.network(name=name) as net: # Assert neutron name is not truncated self.assertEqual(net['network']['name'], name) def test_create_network_maintenance_returns_503(self): data = {'network': {'name': 'foo', 'admin_state_up': True, 'tenant_id': self._tenant_id}} with mock.patch.object(nsxlib, 'do_request', side_effect=nsx_exc.MaintenanceInProgress): net_req = self.new_create_request('networks', data, self.fmt) res = net_req.get_response(self.api) self.assertEqual(webob.exc.HTTPServiceUnavailable.code, res.status_int) def test_update_network_with_admin_false(self): data = {'network': {'admin_state_up': False}} with self.network() as net: plugin = directory.get_plugin() self.assertRaises(NotImplementedError, plugin.update_network, context.get_admin_context(), net['network']['id'], data) def test_update_network_with_name_calls_nsx(self): with mock.patch.object( nsxlib.switch, 'update_lswitch') as update_lswitch_mock: # don't worry about deleting this network, do not use # context manager ctx = context.get_admin_context() # Because of commit 79c9712 a tenant must be specified otherwise # the unit test will fail ctx.tenant_id = 'whatever' plugin = directory.get_plugin() net = plugin.create_network( ctx, {'network': {'name': 'xxx', 'admin_state_up': True, 'shared': False, 'tenant_id': ctx.tenant_id, 'port_security_enabled': True}}) plugin.update_network(ctx, net['id'], {'network': {'name': 'yyy'}}) update_lswitch_mock.assert_called_once_with( mock.ANY, mock.ANY, 'yyy') class SecurityGroupsTestCase(ext_sg.SecurityGroupDBTestCase): def setUp(self): test_utils.override_nsx_ini_test() # mock nsx api client self.fc = fake.FakeClient(vmware.STUBS_PATH) self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True) instance = self.mock_nsx.start() instance.return_value.login.return_value = "the_cookie" # Avoid runs of the synchronizer looping call patch_sync = mock.patch.object(sync, '_start_loopingcall') patch_sync.start() instance.return_value.request.side_effect = self.fc.fake_request super(SecurityGroupsTestCase, self).setUp(vmware.PLUGIN_NAME) self.plugin = directory.get_plugin() class TestSecurityGroup(ext_sg.TestSecurityGroups, SecurityGroupsTestCase): def test_create_security_group_name_exceeds_40_chars(self): name = 'this_is_a_secgroup_whose_name_is_longer_than_40_chars' with self.security_group(name=name) as sg: # Assert Neutron name is not truncated self.assertEqual(sg['security_group']['name'], name) def test_create_security_group_rule_bad_input(self): name = 'foo security group' description = 'foo description' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] protocol = 200 min_range = 32 max_range = 4343 rule = self._build_security_group_rule( security_group_id, 'ingress', protocol, min_range, max_range) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(res.status_int, 400) def test_skip_duplicate_default_sg_error(self): num_called = [0] original_func = self.plugin.create_security_group def side_effect(context, security_group, default_sg): # can't always raise, or create_security_group will hang self.assertTrue(default_sg) self.assertTrue(num_called[0] < 2) num_called[0] += 1 ret = original_func(context, security_group, default_sg) if num_called[0] == 1: return ret # make another call to cause an exception. # NOTE(yamamoto): raising the exception by ourselves # doesn't update the session state appropriately. self.assertRaises(db_exc.DBDuplicateEntry(), original_func, context, security_group, default_sg) with mock.patch.object(self.plugin, 'create_security_group', side_effect=side_effect): self.plugin.create_network( context.get_admin_context(), {'network': {'name': 'foo', 'admin_state_up': True, 'shared': False, 'tenant_id': 'bar', 'port_security_enabled': True}}) def test_create_security_group_rule_icmpv6_legacy_protocol_name(self): self.skipTest('not supported') class TestL3ExtensionManager(object): def get_resources(self): # Simulate extension of L3 attribute map l3.L3().update_attributes_map( l3_egm_apidef.RESOURCE_ATTRIBUTE_MAP) l3.L3().update_attributes_map( dvr_apidef.RESOURCE_ATTRIBUTE_MAP) return l3.L3.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class TestL3SecGrpExtensionManager(TestL3ExtensionManager): """A fake extension manager for L3 and Security Group extensions. Includes also NSX specific L3 attributes. """ def get_resources(self): resources = super(TestL3SecGrpExtensionManager, self).get_resources() resources.extend(secgrp.Securitygroup.get_resources()) return resources class L3NatTest(test_l3_plugin.L3BaseForIntTests, NsxPluginV2TestCase): def setUp(self, plugin=vmware.PLUGIN_NAME, ext_mgr=None, service_plugins=None): cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) ext_mgr = ext_mgr or TestL3ExtensionManager() super(L3NatTest, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) plugin_instance = directory.get_plugin() self._plugin_name = "%s.%s" % ( plugin_instance.__module__, plugin_instance.__class__.__name__) self._plugin_class = plugin_instance.__class__ def _create_l3_ext_network(self, vlan_id=None): name = 'l3_ext_net' net_type = utils.NetworkTypes.L3_EXT providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: 'l3_gw_uuid'} if vlan_id: providernet_args[pnet.SEGMENTATION_ID] = vlan_id return self.network(name=name, router__external=True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)) class TestL3NatTestCase(L3NatTest, test_l3_plugin.L3NatDBIntTestCase, NsxPluginV2TestCase, test_metadata.MetaDataTestCase): def _test_create_l3_ext_network(self, vlan_id=0): name = 'l3_ext_net' net_type = utils.NetworkTypes.L3_EXT expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (extnet_apidef.EXTERNAL, True), (pnet.NETWORK_TYPE, net_type), (pnet.PHYSICAL_NETWORK, 'l3_gw_uuid'), (pnet.SEGMENTATION_ID, vlan_id)] with self._create_l3_ext_network(vlan_id) as net: for k, v in expected: self.assertEqual(net['network'][k], v) def _nsx_validate_ext_gw(self, router_id, l3_gw_uuid, vlan_id): """Verify data on fake NSX API client in order to validate plugin did set them properly """ # First find the NSX router ID ctx = context.get_admin_context() nsx_router_id = nsx_db.get_nsx_router_id(ctx.session, router_id) ports = [port for port in self.fc._fake_lrouter_lport_dict.values() if (port['lr_uuid'] == nsx_router_id and port['att_type'] == "L3GatewayAttachment")] self.assertEqual(len(ports), 1) self.assertEqual(ports[0]['attachment_gwsvc_uuid'], l3_gw_uuid) self.assertEqual(ports[0].get('vlan_id'), vlan_id) def test_create_l3_ext_network_without_vlan(self): self._test_create_l3_ext_network() def _test_router_create_with_gwinfo_and_l3_ext_net(self, vlan_id=None, validate_ext_gw=True): with self._create_l3_ext_network(vlan_id) as net: with self.subnet(network=net) as s: data = {'router': {'tenant_id': 'whatever'}} data['router']['name'] = 'router1' data['router']['external_gateway_info'] = { 'network_id': s['subnet']['network_id']} router_req = self.new_create_request('routers', data, self.fmt) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) self.assertEqual( s['subnet']['network_id'], (router['router']['external_gateway_info'] ['network_id'])) if validate_ext_gw: self._nsx_validate_ext_gw(router['router']['id'], 'l3_gw_uuid', vlan_id) def test_router_create_with_gwinfo_and_l3_ext_net(self): self._test_router_create_with_gwinfo_and_l3_ext_net() def test_router_create_with_gwinfo_and_l3_ext_net_with_vlan(self): self._test_router_create_with_gwinfo_and_l3_ext_net(444) def _test_router_create_with_distributed(self, dist_input, dist_expected, version='3.1', return_code=201): self.mock_instance.return_value.get_version.return_value = ( ver_module.Version(version)) data = {'tenant_id': 'whatever'} data['name'] = 'router1' data['distributed'] = dist_input router_req = self.new_create_request( 'routers', {'router': data}, self.fmt) res = router_req.get_response(self.ext_api) self.assertEqual(return_code, res.status_int) if res.status_int == 201: router = self.deserialize(self.fmt, res) self.assertIn('distributed', router['router']) self.assertEqual(dist_expected, router['router']['distributed']) def test_router_create_distributed_with_3_1(self): self._test_router_create_with_distributed(True, True) def test_router_create_distributed_with_new_nsx_versions(self): with mock.patch.object(nsxlib.router, 'create_explicit_route_lrouter'): self._test_router_create_with_distributed(True, True, '3.2') self._test_router_create_with_distributed(True, True, '4.0') self._test_router_create_with_distributed(True, True, '4.1') def test_router_create_not_distributed(self): self._test_router_create_with_distributed(False, False) def test_router_create_distributed_unspecified(self): self._test_router_create_with_distributed(None, False) def test_router_create_distributed_returns_400(self): self._test_router_create_with_distributed(True, None, '3.0', 400) def test_router_create_on_obsolete_platform(self): def obsolete_response(*args, **kwargs): response = (nsxlib.router. _create_implicit_routing_lrouter(*args, **kwargs)) response.pop('distributed') return response with mock.patch.object( nsxlib.router, 'create_lrouter', new=obsolete_response): self._test_router_create_with_distributed(None, False, '2.2') def _create_router_with_gw_info_for_test(self, subnet): data = {'router': {'tenant_id': 'whatever', 'name': 'router1', 'external_gateway_info': {'network_id': subnet['subnet']['network_id']}}} router_req = self.new_create_request( 'routers', data, self.fmt) return router_req.get_response(self.ext_api) def test_router_create_nsx_error_returns_500(self, vlan_id=None): with mock.patch.object(nsxlib.router, 'create_router_lport', side_effect=api_exc.NsxApiException): with self._create_l3_ext_network(vlan_id) as net: with self.subnet(network=net) as s: res = self._create_router_with_gw_info_for_test(s) self.assertEqual( webob.exc.HTTPInternalServerError.code, res.status_int) def test_router_add_gateway_invalid_network_returns_404(self): # NOTE(salv-orlando): This unit test has been overridden # as the nsx plugin support the ext_gw_mode extension # which mandates an uuid for the external network identifier with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], uuidutils.generate_uuid(), expected_code=webob.exc.HTTPNotFound.code) def _verify_router_rollback(self): # Check that nothing is left on DB # TODO(salv-orlando): Verify whehter this is thread-safe # w.r.t. sqllite and parallel testing self._test_list_resources('router', []) # Check that router is not in NSX self.assertFalse(self.fc._fake_lrouter_dict) # TODO(asarfaty): make this test pass with the new enginefacade def skip_test_router_create_with_gw_info_neutron_fail_does_rollback(self): # Simulate get subnet error while building list of ips with prefix with mock.patch.object(self._plugin_class, '_build_ip_address_list', side_effect=ntn_exc.SubnetNotFound( subnet_id='xxx')): with self._create_l3_ext_network() as net: with self.subnet(network=net) as s: res = self._create_router_with_gw_info_for_test(s) self.assertEqual( webob.exc.HTTPNotFound.code, res.status_int) self._verify_router_rollback() def test_router_create_with_gw_info_nsx_fail_does_rollback(self): # Simulate error while fetching nsx router gw port with mock.patch.object(self._plugin_class, '_find_router_gw_port', side_effect=api_exc.NsxApiException): with self._create_l3_ext_network() as net: with self.subnet(network=net) as s: res = self._create_router_with_gw_info_for_test(s) self.assertEqual( webob.exc.HTTPInternalServerError.code, res.status_int) self._verify_router_rollback() def _test_router_update_gateway_on_l3_ext_net(self, vlan_id=None, validate_ext_gw=True): with self.router() as r: with self.subnet() as s1: with self._create_l3_ext_network(vlan_id) as net: with self.subnet(network=net) as s2: self._set_net_external(s1['subnet']['network_id']) try: self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s1['subnet']['network_id']) # Plug network with external mapping self._set_net_external(s2['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s2['subnet']['network_id']) if validate_ext_gw: self._nsx_validate_ext_gw( body['router']['id'], 'l3_gw_uuid', vlan_id) finally: # Cleanup self._remove_external_gateway_from_router( r['router']['id'], s2['subnet']['network_id']) def test_router_update_gateway_on_l3_ext_net(self): self._test_router_update_gateway_on_l3_ext_net() def test_router_update_gateway_on_l3_ext_net_with_vlan(self): self._test_router_update_gateway_on_l3_ext_net(444) def test_router_list_by_tenant_id(self): with self.router(), self.router(): with self.router(tenant_id='custom') as router1: self._test_list_resources('router', [router1], query_params="tenant_id=custom") def test_create_l3_ext_network_with_vlan(self): self._test_create_l3_ext_network(666) def test_floatingip_with_assoc_fails(self): self._test_floatingip_with_assoc_fails( "%s.%s" % (self._plugin_name, "_update_fip_assoc")) def test_floatingip_with_invalid_create_port(self): self._test_floatingip_with_invalid_create_port(self._plugin_name) def test_create_router_name_exceeds_40_chars(self): name = 'this_is_a_router_whose_name_is_longer_than_40_chars' with self.router(name=name) as rtr: # Assert Neutron name is not truncated self.assertEqual(rtr['router']['name'], name) def test_router_add_interface_port(self): orig_update_port = self.plugin.update_port with self.router() as r, ( self.port()) as p, ( mock.patch.object(self.plugin, 'update_port')) as update_port: update_port.side_effect = orig_update_port body = self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self.assertIn('port_id', body) self.assertEqual(p['port']['id'], body['port_id']) expected_port_update = {'port_security_enabled': False, 'security_groups': []} update_port.assert_any_call( mock.ANY, p['port']['id'], {'port': expected_port_update}) # fetch port and confirm device_id body = self._show('ports', p['port']['id']) self.assertEqual(r['router']['id'], body['port']['device_id']) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def _test_floatingip_update(self, expected_status): super(TestL3NatTestCase, self).test_floatingip_update( expected_status) def test_floatingip_update(self): self._test_floatingip_update(constants.FLOATINGIP_STATUS_DOWN) def test_floatingip_disassociate(self): with self.port() as p: private_sub = {'subnet': {'id': p['port']['fixed_ips'][0]['subnet_id']}} plugin = directory.get_plugin() with mock.patch.object(plugin, 'notify_routers_updated') as notify: with self.floatingip_no_assoc(private_sub) as fip: port_id = p['port']['id'] body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': port_id}}) self.assertEqual(body['floatingip']['port_id'], port_id) # Floating IP status should be active self.assertEqual(constants.FLOATINGIP_STATUS_ACTIVE, body['floatingip']['status']) # Disassociate body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': None}}) body = self._show('floatingips', fip['floatingip']['id']) self.assertIsNone(body['floatingip']['port_id']) self.assertIsNone(body['floatingip']['fixed_ip_address']) # Floating IP status should be down self.assertEqual(constants.FLOATINGIP_STATUS_DOWN, body['floatingip']['status']) # check that notification was not requested self.assertFalse(notify.called) def test_create_router_maintenance_returns_503(self): with self._create_l3_ext_network() as net: with self.subnet(network=net) as s: with mock.patch.object( nsxlib, 'do_request', side_effect=nsx_exc.MaintenanceInProgress): data = {'router': {'tenant_id': 'whatever'}} data['router']['name'] = 'router1' data['router']['external_gateway_info'] = { 'network_id': s['subnet']['network_id']} router_req = self.new_create_request( 'routers', data, self.fmt) res = router_req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPServiceUnavailable.code, res.status_int) def test_router_add_interface_port_removes_security_group(self): with self.router() as r: with self.port(do_delete=False) as p: body = self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self.assertIn('port_id', body) self.assertEqual(body['port_id'], p['port']['id']) # fetch port and confirm no security-group on it. body = self._show('ports', p['port']['id']) self.assertEqual(body['port']['security_groups'], []) self.assertFalse(body['port']['port_security_enabled']) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_update_subnet_gateway_for_external_net(self): plugin = directory.get_plugin() port_mock = {'uuid': uuidutils.generate_uuid()} with mock.patch.object(plugin, '_find_router_gw_port', return_value=port_mock): super(TestL3NatTestCase, self).test_update_subnet_gateway_for_external_net() def test_floatingip_update_to_same_port_id_twice(self): self.skipTest('Plugin changes floating port status') def test_floating_port_status_not_applicable(self): self.skipTest('Plugin changes floating port status') def test_create_router_gateway_fails(self): self.skipTest('not supported') def test_first_floatingip_associate_notification(self): self.skipTest('not supported') def test_floatingip_disassociate_notification(self): self.skipTest('not supported') def test_metadata_network_with_update_subnet_dhcp_enable(self): self.skipTest('not supported') def test_metadata_network_with_update_subnet_dhcp_disable(self): self.skipTest('not supported') def test_floatingip_via_router_interface_returns_404(self): self.skipTest('not supported') def test_floatingip_via_router_interface_returns_201(self): self.skipTest('not supported') def test_floatingip_update_subnet_gateway_disabled(self): self.skipTest('not supported') class ExtGwModeTestCase(NsxPluginV2TestCase, test_ext_gw_mode.ExtGwModeIntTestCase): def test_router_gateway_set_fail_after_port_create(self): self.skipTest("TBD") class NeutronNsxOutOfSync(NsxPluginV2TestCase, test_l3_plugin.L3NatTestCaseMixin, ext_sg.SecurityGroupsTestCase): def setUp(self): super(NeutronNsxOutOfSync, self).setUp( ext_mgr=TestL3SecGrpExtensionManager()) def test_delete_network_not_in_nsx(self): res = self._create_network('json', 'net1', True) net1 = self.deserialize('json', res) self.fc._fake_lswitch_dict.clear() req = self.new_delete_request('networks', net1['network']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 204) def test_show_network_not_in_nsx(self): res = self._create_network('json', 'net1', True) net = self.deserialize('json', res) self.fc._fake_lswitch_dict.clear() req = self.new_show_request('networks', net['network']['id'], fields=['id', 'status']) net = self.deserialize('json', req.get_response(self.api)) self.assertEqual(net['network']['status'], constants.NET_STATUS_ERROR) def test_delete_port_not_in_nsx(self): res = self._create_network('json', 'net1', True) net1 = self.deserialize('json', res) res = self._create_port('json', net1['network']['id']) port = self.deserialize('json', res) self.fc._fake_lswitch_lport_dict.clear() req = self.new_delete_request('ports', port['port']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 204) def test_show_port_not_in_nsx(self): res = self._create_network('json', 'net1', True) net1 = self.deserialize('json', res) res = self._create_port('json', net1['network']['id']) port = self.deserialize('json', res) self.fc._fake_lswitch_lport_dict.clear() self.fc._fake_lswitch_lportstatus_dict.clear() req = self.new_show_request('ports', port['port']['id'], fields=['id', 'status']) net = self.deserialize('json', req.get_response(self.api)) self.assertEqual(net['port']['status'], constants.PORT_STATUS_ERROR) def test_create_port_on_network_not_in_nsx(self): res = self._create_network('json', 'net1', True) net1 = self.deserialize('json', res) self.fc._fake_lswitch_dict.clear() res = self._create_port('json', net1['network']['id']) port = self.deserialize('json', res) self.assertEqual(port['port']['status'], constants.PORT_STATUS_ERROR) def test_update_port_not_in_nsx(self): res = self._create_network('json', 'net1', True) net1 = self.deserialize('json', res) res = self._create_port('json', net1['network']['id']) port = self.deserialize('json', res) self.fc._fake_lswitch_lport_dict.clear() data = {'port': {'name': 'error_port'}} req = self.new_update_request('ports', data, port['port']['id']) port = self.deserialize('json', req.get_response(self.api)) self.assertEqual(port['port']['status'], constants.PORT_STATUS_ERROR) self.assertEqual(port['port']['name'], 'error_port') def test_delete_port_and_network_not_in_nsx(self): res = self._create_network('json', 'net1', True) net1 = self.deserialize('json', res) res = self._create_port('json', net1['network']['id']) port = self.deserialize('json', res) self.fc._fake_lswitch_dict.clear() self.fc._fake_lswitch_lport_dict.clear() req = self.new_delete_request('ports', port['port']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 204) req = self.new_delete_request('networks', net1['network']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 204) def test_delete_router_not_in_nsx(self): res = self._create_router('json', 'tenant') router = self.deserialize('json', res) self.fc._fake_lrouter_dict.clear() req = self.new_delete_request('routers', router['router']['id']) res = req.get_response(self.ext_api) self.assertEqual(res.status_int, 204) def test_show_router_not_in_nsx(self): res = self._create_router('json', 'tenant') router = self.deserialize('json', res) self.fc._fake_lrouter_dict.clear() req = self.new_show_request('routers', router['router']['id'], fields=['id', 'status']) router = self.deserialize('json', req.get_response(self.ext_api)) self.assertEqual(router['router']['status'], constants.NET_STATUS_ERROR) def _create_network_and_subnet(self, cidr, external=False): net_res = self._create_network('json', 'ext_net', True) net = self.deserialize('json', net_res) net_id = net['network']['id'] if external: self._update('networks', net_id, {'network': {extnet_apidef.EXTERNAL: True}}) sub_res = self._create_subnet('json', net_id, cidr) sub = self.deserialize('json', sub_res) return net_id, sub['subnet']['id'] def test_clear_gateway_nat_rule_not_in_nsx(self): # Create external network and subnet ext_net_id = self._create_network_and_subnet('1.1.1.0/24', True)[0] # Create internal network and subnet int_sub_id = self._create_network_and_subnet('10.0.0.0/24')[1] res = self._create_router('json', 'tenant') router = self.deserialize('json', res) # Add interface to router (needed to generate NAT rule) req = self.new_action_request( 'routers', {'subnet_id': int_sub_id}, router['router']['id'], "add_router_interface") res = req.get_response(self.ext_api) self.assertEqual(res.status_int, 200) # Set gateway for router req = self.new_update_request( 'routers', {'router': {'external_gateway_info': {'network_id': ext_net_id}}}, router['router']['id']) res = req.get_response(self.ext_api) self.assertEqual(res.status_int, 200) # Delete NAT rule from NSX, clear gateway # and verify operation still succeeds self.fc._fake_lrouter_nat_dict.clear() req = self.new_update_request( 'routers', {'router': {'external_gateway_info': {}}}, router['router']['id']) res = req.get_response(self.ext_api) self.assertEqual(res.status_int, 200) def _test_remove_router_interface_nsx_out_of_sync(self, unsync_action): # Create external network and subnet ext_net_id = self._create_network_and_subnet('1.1.1.0/24', True)[0] # Create internal network and subnet int_sub_id = self._create_network_and_subnet('10.0.0.0/24')[1] res = self._create_router('json', 'tenant') router = self.deserialize('json', res) # Set gateway and add interface to router (needed to generate NAT rule) req = self.new_update_request( 'routers', {'router': {'external_gateway_info': {'network_id': ext_net_id}}}, router['router']['id']) res = req.get_response(self.ext_api) self.assertEqual(res.status_int, 200) req = self.new_action_request( 'routers', {'subnet_id': int_sub_id}, router['router']['id'], "add_router_interface") res = req.get_response(self.ext_api) self.assertEqual(res.status_int, 200) unsync_action() req = self.new_action_request( 'routers', {'subnet_id': int_sub_id}, router['router']['id'], "remove_router_interface") res = req.get_response(self.ext_api) self.assertEqual(res.status_int, 200) def test_remove_router_interface_not_in_nsx(self): def unsync_action(): self.fc._fake_lrouter_dict.clear() self.fc._fake_lrouter_nat_dict.clear() self._test_remove_router_interface_nsx_out_of_sync(unsync_action) def test_remove_router_interface_nat_rule_not_in_nsx(self): self._test_remove_router_interface_nsx_out_of_sync( self.fc._fake_lrouter_nat_dict.clear) def test_remove_router_interface_duplicate_nat_rules_in_nsx(self): def unsync_action(): # duplicate every entry in the nat rule dict tmp = copy.deepcopy(self.fc._fake_lrouter_nat_dict) for (_rule_id, rule) in tmp.items(): self.fc._fake_lrouter_nat_dict[uuid.uuid4()] = rule self._test_remove_router_interface_nsx_out_of_sync(unsync_action) def test_update_router_not_in_nsx(self): res = self._create_router('json', 'tenant') router = self.deserialize('json', res) self.fc._fake_lrouter_dict.clear() req = self.new_update_request( 'routers', {'router': {'name': 'goo'}}, router['router']['id']) res = req.get_response(self.ext_api) self.assertEqual(res.status_int, 500) req = self.new_show_request('routers', router['router']['id']) router = self.deserialize('json', req.get_response(self.ext_api)) self.assertEqual(router['router']['status'], constants.NET_STATUS_ERROR) def test_delete_security_group_not_in_nsx(self): res = self._create_security_group('json', 'name', 'desc') sec_group = self.deserialize('json', res) self.fc._fake_securityprofile_dict.clear() req = self.new_delete_request( 'security-groups', sec_group['security_group']['id']) res = req.get_response(self.ext_api) self.assertEqual(res.status_int, 204) class DHCPOptsTestCase(test_dhcpopts.TestExtraDhcpOpt, NsxPluginV2TestCase): def setUp(self, plugin=None): super(test_dhcpopts.ExtraDhcpOptDBTestCase, self).setUp( plugin=vmware.PLUGIN_NAME) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_mh/test_sync.py0000666000175100017510000010070613244523345024134 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import contextlib import sys import time import mock from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import l3 as l3_exc from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from neutron.tests import base from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit import testlib_api from vmware_nsx.api_client import client from vmware_nsx.api_client import exception as api_exc from vmware_nsx.api_client import version from vmware_nsx.common import sync from vmware_nsx.db import db from vmware_nsx import nsx_cluster as cluster from vmware_nsx.nsxlib import mh as nsxlib from vmware_nsx import plugin from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsx_mh.apiclient import fake LOG = log.getLogger(__name__) _uuid = test_base._uuid LSWITCHES = [{'uuid': _uuid(), 'name': 'ls-1'}, {'uuid': _uuid(), 'name': 'ls-2'}] LSWITCHPORTS = [{'uuid': _uuid(), 'name': 'lp-1'}, {'uuid': _uuid(), 'name': 'lp-2'}] LROUTERS = [{'uuid': _uuid(), 'name': 'lr-1'}, {'uuid': _uuid(), 'name': 'lr-2'}] class CacheTestCase(base.BaseTestCase): """Test suite providing coverage for the Cache class.""" def setUp(self): self.nsx_cache = sync.NsxCache() for lswitch in LSWITCHES: self.nsx_cache._uuid_dict_mappings[lswitch['uuid']] = ( self.nsx_cache._lswitches) self.nsx_cache._lswitches[lswitch['uuid']] = ( {'data': lswitch, 'hash': hash(jsonutils.dumps(lswitch))}) for lswitchport in LSWITCHPORTS: self.nsx_cache._uuid_dict_mappings[lswitchport['uuid']] = ( self.nsx_cache._lswitchports) self.nsx_cache._lswitchports[lswitchport['uuid']] = ( {'data': lswitchport, 'hash': hash(jsonutils.dumps(lswitchport))}) for lrouter in LROUTERS: self.nsx_cache._uuid_dict_mappings[lrouter['uuid']] = ( self.nsx_cache._lrouters) self.nsx_cache._lrouters[lrouter['uuid']] = ( {'data': lrouter, 'hash': hash(jsonutils.dumps(lrouter))}) super(CacheTestCase, self).setUp() def test_get_lswitches(self): ls_uuids = self.nsx_cache.get_lswitches() self.assertEqual(set(ls_uuids), set([ls['uuid'] for ls in LSWITCHES])) def test_get_lswitchports(self): lp_uuids = self.nsx_cache.get_lswitchports() self.assertEqual(set(lp_uuids), set([lp['uuid'] for lp in LSWITCHPORTS])) def test_get_lrouters(self): lr_uuids = self.nsx_cache.get_lrouters() self.assertEqual(set(lr_uuids), set([lr['uuid'] for lr in LROUTERS])) def test_get_lswitches_changed_only(self): ls_uuids = self.nsx_cache.get_lswitches(changed_only=True) self.assertEqual(0, len(ls_uuids)) def test_get_lswitchports_changed_only(self): lp_uuids = self.nsx_cache.get_lswitchports(changed_only=True) self.assertEqual(0, len(lp_uuids)) def test_get_lrouters_changed_only(self): lr_uuids = self.nsx_cache.get_lrouters(changed_only=True) self.assertEqual(0, len(lr_uuids)) def _verify_update(self, new_resource, changed=True, hit=True): cached_resource = self.nsx_cache[new_resource['uuid']] self.assertEqual(new_resource, cached_resource['data']) self.assertEqual(hit, cached_resource.get('hit', False)) self.assertEqual(changed, cached_resource.get('changed', False)) def test_update_lswitch_new_item(self): new_switch_uuid = _uuid() new_switch = {'uuid': new_switch_uuid, 'name': 'new_switch'} self.nsx_cache.update_lswitch(new_switch) self.assertIn(new_switch_uuid, self.nsx_cache._lswitches.keys()) self._verify_update(new_switch) def test_update_lswitch_existing_item(self): switch = LSWITCHES[0] switch['name'] = 'new_name' self.nsx_cache.update_lswitch(switch) self.assertIn(switch['uuid'], self.nsx_cache._lswitches.keys()) self._verify_update(switch) def test_update_lswitchport_new_item(self): new_switchport_uuid = _uuid() new_switchport = {'uuid': new_switchport_uuid, 'name': 'new_switchport'} self.nsx_cache.update_lswitchport(new_switchport) self.assertIn(new_switchport_uuid, self.nsx_cache._lswitchports.keys()) self._verify_update(new_switchport) def test_update_lswitchport_existing_item(self): switchport = LSWITCHPORTS[0] switchport['name'] = 'new_name' self.nsx_cache.update_lswitchport(switchport) self.assertIn(switchport['uuid'], self.nsx_cache._lswitchports.keys()) self._verify_update(switchport) def test_update_lrouter_new_item(self): new_router_uuid = _uuid() new_router = {'uuid': new_router_uuid, 'name': 'new_router'} self.nsx_cache.update_lrouter(new_router) self.assertIn(new_router_uuid, self.nsx_cache._lrouters.keys()) self._verify_update(new_router) def test_update_lrouter_existing_item(self): router = LROUTERS[0] router['name'] = 'new_name' self.nsx_cache.update_lrouter(router) self.assertIn(router['uuid'], self.nsx_cache._lrouters.keys()) self._verify_update(router) def test_process_updates_initial(self): # Clear cache content to simulate first-time filling self.nsx_cache._lswitches.clear() self.nsx_cache._lswitchports.clear() self.nsx_cache._lrouters.clear() self.nsx_cache.process_updates(LSWITCHES, LROUTERS, LSWITCHPORTS) for resource in LSWITCHES + LROUTERS + LSWITCHPORTS: self._verify_update(resource) def test_process_updates_no_change(self): self.nsx_cache.process_updates(LSWITCHES, LROUTERS, LSWITCHPORTS) for resource in LSWITCHES + LROUTERS + LSWITCHPORTS: self._verify_update(resource, changed=False) def test_process_updates_with_changes(self): LSWITCHES[0]['name'] = 'altered' self.nsx_cache.process_updates(LSWITCHES, LROUTERS, LSWITCHPORTS) for resource in LSWITCHES + LROUTERS + LSWITCHPORTS: changed = (True if resource['uuid'] == LSWITCHES[0]['uuid'] else False) self._verify_update(resource, changed=changed) def _test_process_updates_with_removals(self): lswitches = LSWITCHES[:] lswitch = lswitches.pop() self.nsx_cache.process_updates(lswitches, LROUTERS, LSWITCHPORTS) for resource in LSWITCHES + LROUTERS + LSWITCHPORTS: hit = (False if resource['uuid'] == lswitch['uuid'] else True) self._verify_update(resource, changed=False, hit=hit) return (lswitch, lswitches) def test_process_updates_with_removals(self): self._test_process_updates_with_removals() def test_process_updates_cleanup_after_delete(self): deleted_lswitch, lswitches = self._test_process_updates_with_removals() self.nsx_cache.process_deletes() self.nsx_cache.process_updates(lswitches, LROUTERS, LSWITCHPORTS) self.assertNotIn(deleted_lswitch['uuid'], self.nsx_cache._lswitches) def test_update_resource_does_not_cleanup_deleted_resources(self): deleted_lswitch, lswitches = self._test_process_updates_with_removals() self.nsx_cache.process_deletes() self.nsx_cache.update_lswitch(deleted_lswitch) self.assertIn(deleted_lswitch['uuid'], self.nsx_cache._lswitches) def _verify_delete(self, resource, deleted=True, hit=True): cached_resource = self.nsx_cache[resource['uuid']] data_field = 'data_bk' if deleted else 'data' self.assertEqual(resource, cached_resource[data_field]) self.assertEqual(hit, cached_resource.get('hit', False)) self.assertEqual(deleted, cached_resource.get('changed', False)) def _set_hit(self, resources, uuid_to_delete=None): for resource in resources: if resource['data']['uuid'] != uuid_to_delete: resource['hit'] = True def test_process_deletes_no_change(self): # Mark all resources as hit self._set_hit(self.nsx_cache._lswitches.values()) self._set_hit(self.nsx_cache._lswitchports.values()) self._set_hit(self.nsx_cache._lrouters.values()) self.nsx_cache.process_deletes() for resource in LSWITCHES + LROUTERS + LSWITCHPORTS: self._verify_delete(resource, hit=False, deleted=False) def test_process_deletes_with_removals(self): # Mark all resources but one as hit uuid_to_delete = LSWITCHPORTS[0]['uuid'] self._set_hit(self.nsx_cache._lswitches.values(), uuid_to_delete) self._set_hit(self.nsx_cache._lswitchports.values(), uuid_to_delete) self._set_hit(self.nsx_cache._lrouters.values(), uuid_to_delete) self.nsx_cache.process_deletes() for resource in LSWITCHES + LROUTERS + LSWITCHPORTS: deleted = resource['uuid'] == uuid_to_delete self._verify_delete(resource, hit=False, deleted=deleted) class SyncLoopingCallTestCase(base.BaseTestCase): def test_looping_calls(self): # Avoid runs of the synchronization process - just start # the looping call with mock.patch.object( sync.NsxSynchronizer, '_synchronize_state', return_value=0.01,): synchronizer = sync.NsxSynchronizer(mock.ANY, mock.ANY, 100, 0, 0, initial_delay=0) time.sleep(0.03) # stop looping call before asserting synchronizer._sync_looping_call.stop() # Just verify the looping call has been called, trying # to assess the exact number of calls would be unreliable self.assertTrue(synchronizer._synchronize_state.call_count) class SyncTestCase(testlib_api.SqlTestCase): def setUp(self): # mock api client self.fc = fake.FakeClient(vmware.STUBS_PATH) mock_api = mock.patch(vmware.NSXAPI_NAME, autospec=True) # Avoid runs of the synchronizer looping call # These unit tests will excplicitly invoke synchronization patch_sync = mock.patch.object(sync, '_start_loopingcall') self.mock_api = mock_api.start() patch_sync.start() self.mock_api.return_value.login.return_value = "the_cookie" # Emulate tests against NSX 3.x self.mock_api.return_value.get_version.return_value = ( version.Version("3.1")) self.mock_api.return_value.request.side_effect = self.fc.fake_request self.fake_cluster = cluster.NSXCluster( name='fake-cluster', nsx_controllers=['1.1.1.1:999'], default_tz_uuid=_uuid(), nsx_user='foo', nsx_password='bar') self.fake_cluster.api_client = client.NsxApiClient( ('1.1.1.1', '999', True), self.fake_cluster.nsx_user, self.fake_cluster.nsx_password, http_timeout=self.fake_cluster.http_timeout, retries=self.fake_cluster.retries, redirects=self.fake_cluster.redirects) # Instantiate Neutron plugin # and setup needed config variables args = ['--config-file', vmware.get_fake_conf('neutron.conf.test'), '--config-file', vmware.get_fake_conf('nsx.ini.test')] self.config_parse(args=args) cfg.CONF.set_override('allow_overlapping_ips', True) with mock.patch("neutron.common.rpc.create_connection"): self._plugin = plugin.NsxPlugin() mock_nm_get_plugin = mock.patch( "neutron_lib.plugins.directory.get_plugin") self.mock_nm_get_plugin = mock_nm_get_plugin.start() self.mock_nm_get_plugin.return_value = self._plugin super(SyncTestCase, self).setUp() self.addCleanup(self.fc.reset_all) @contextlib.contextmanager def _populate_data(self, ctx, net_size=2, port_size=2, router_size=2): def network(idx): return {'network': {'name': 'net-%s' % idx, 'admin_state_up': True, 'shared': False, 'port_security_enabled': True, 'tenant_id': 'foo'}} def subnet(idx, net_id): return {'subnet': {'cidr': '10.10.%s.0/24' % idx, 'name': 'sub-%s' % idx, 'gateway_ip': constants.ATTR_NOT_SPECIFIED, 'allocation_pools': constants.ATTR_NOT_SPECIFIED, 'ip_version': 4, 'dns_nameservers': constants.ATTR_NOT_SPECIFIED, 'host_routes': constants.ATTR_NOT_SPECIFIED, 'enable_dhcp': True, 'network_id': net_id, 'tenant_id': 'foo'}} def port(idx, net_id): return {'port': {'network_id': net_id, 'name': 'port-%s' % idx, 'admin_state_up': True, 'device_id': 'miao', 'device_owner': 'bau', 'fixed_ips': constants.ATTR_NOT_SPECIFIED, 'mac_address': constants.ATTR_NOT_SPECIFIED, 'tenant_id': 'foo'}} def router(idx): # Use random uuids as names return {'router': {'name': 'rtr-%s' % idx, 'admin_state_up': True, 'tenant_id': 'foo'}} networks = [] ports = [] routers = [] for i in range(net_size): net = self._plugin.create_network(ctx, network(i)) networks.append(net) self._plugin.create_subnet(ctx, subnet(i, net['id'])) for j in range(port_size): ports.append(self._plugin.create_port( ctx, port("%s-%s" % (i, j), net['id']))) for i in range(router_size): routers.append(self._plugin.create_router(ctx, router(i))) # Do not return anything as the user does need the actual # data created yield # Remove everything for router in routers: self._plugin.delete_router(ctx, router['id']) for port in ports: self._plugin.delete_port(ctx, port['id']) # This will remove networks and subnets for network in networks: self._plugin.delete_network(ctx, network['id']) def _get_tag_dict(self, tags): return dict((tag['scope'], tag['tag']) for tag in tags) def _test_sync(self, exp_net_status, exp_port_status, exp_router_status, action_callback=None, sp=None): ls_uuid = list(self.fc._fake_lswitch_dict)[0] neutron_net_id = self._get_tag_dict( self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id'] lp_uuid = list(self.fc._fake_lswitch_lport_dict)[0] neutron_port_id = self._get_tag_dict( self.fc._fake_lswitch_lport_dict[lp_uuid]['tags'])['q_port_id'] lr_uuid = list(self.fc._fake_lrouter_dict)[0] neutron_rtr_id = self._get_tag_dict( self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id'] if action_callback: action_callback(ls_uuid, lp_uuid, lr_uuid) # Make chunk big enough to read everything if not sp: sp = sync.SyncParameters(100) self._plugin._synchronizer._synchronize_state(sp) # Verify element is in expected status # TODO(salv-orlando): Verify status for all elements ctx = context.get_admin_context() neutron_net = self._plugin.get_network(ctx, neutron_net_id) neutron_port = self._plugin.get_port(ctx, neutron_port_id) neutron_rtr = self._plugin.get_router(ctx, neutron_rtr_id) self.assertEqual(exp_net_status, neutron_net['status']) self.assertEqual(exp_port_status, neutron_port['status']) self.assertEqual(exp_router_status, neutron_rtr['status']) def _action_callback_status_down(self, ls_uuid, lp_uuid, lr_uuid): self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false' self.fc._fake_lswitch_lport_dict[lp_uuid]['status'] = 'false' self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false' def test_initial_sync(self): ctx = context.get_admin_context() with self._populate_data(ctx): self._test_sync( constants.NET_STATUS_ACTIVE, constants.PORT_STATUS_ACTIVE, constants.NET_STATUS_ACTIVE) def test_initial_sync_with_resources_down(self): ctx = context.get_admin_context() with self._populate_data(ctx): self._test_sync( constants.NET_STATUS_DOWN, constants.PORT_STATUS_DOWN, constants.NET_STATUS_DOWN, self._action_callback_status_down) def test_resync_with_resources_down(self): if sys.version_info >= (3, 0): # FIXME(arosen): this does not fail with an error... self.skipTest('not supported') ctx = context.get_admin_context() with self._populate_data(ctx): sp = sync.SyncParameters(100) self._plugin._synchronizer._synchronize_state(sp) # Ensure the synchronizer performs a resync sp.init_sync_performed = True self._test_sync( constants.NET_STATUS_DOWN, constants.PORT_STATUS_DOWN, constants.NET_STATUS_DOWN, self._action_callback_status_down, sp=sp) def _action_callback_del_resource(self, ls_uuid, lp_uuid, lr_uuid): del self.fc._fake_lswitch_dict[ls_uuid] del self.fc._fake_lswitch_lport_dict[lp_uuid] del self.fc._fake_lrouter_dict[lr_uuid] def test_initial_sync_with_resources_removed(self): if sys.version_info >= (3, 0): # FIXME(arosen): this does not fail with an error... self.skipTest('not supported') ctx = context.get_admin_context() with self._populate_data(ctx): self._test_sync( constants.NET_STATUS_ERROR, constants.PORT_STATUS_ERROR, constants.NET_STATUS_ERROR, self._action_callback_del_resource) def test_resync_with_resources_removed(self): if sys.version_info >= (3, 0): # FIXME(arosen): this does not fail with an error... self.skipTest('not supported') ctx = context.get_admin_context() with self._populate_data(ctx): sp = sync.SyncParameters(100) self._plugin._synchronizer._synchronize_state(sp) # Ensure the synchronizer performs a resync sp.init_sync_performed = True self._test_sync( constants.NET_STATUS_ERROR, constants.PORT_STATUS_ERROR, constants.NET_STATUS_ERROR, self._action_callback_del_resource, sp=sp) def _test_sync_with_chunk_larger_maxpagesize( self, net_size, port_size, router_size, chunk_size, exp_calls): ctx = context.get_admin_context() real_func = nsxlib.get_single_query_page sp = sync.SyncParameters(chunk_size) with self._populate_data(ctx, net_size=net_size, port_size=port_size, router_size=router_size): with mock.patch.object(sync, 'MAX_PAGE_SIZE', 15): # The following mock is just for counting calls, # but we will still run the actual function with mock.patch.object( nsxlib, 'get_single_query_page', side_effect=real_func) as mock_get_page: self._test_sync( constants.NET_STATUS_ACTIVE, constants.PORT_STATUS_ACTIVE, constants.NET_STATUS_ACTIVE, sp=sp) # As each resource type does not exceed the maximum page size, # the method should be called once for each resource type self.assertEqual(exp_calls, mock_get_page.call_count) def test_sync_chunk_larger_maxpagesize_no_multiple_requests(self): # total resource size = 20 # total size for each resource does not exceed max page size (15) self._test_sync_with_chunk_larger_maxpagesize( net_size=5, port_size=2, router_size=5, chunk_size=20, exp_calls=3) def test_sync_chunk_larger_maxpagesize_triggers_multiple_requests(self): # total resource size = 48 # total size for each resource does exceed max page size (15) self._test_sync_with_chunk_larger_maxpagesize( net_size=16, port_size=1, router_size=16, chunk_size=48, exp_calls=6) def test_sync_multi_chunk(self): # The fake NSX API client cannot be used for this test ctx = context.get_admin_context() # Generate 4 networks, 1 port per network, and 4 routers with self._populate_data(ctx, net_size=4, port_size=1, router_size=4): fake_lswitches = jsonutils.loads( self.fc.handle_get('/ws.v1/lswitch'))['results'] fake_lrouters = jsonutils.loads( self.fc.handle_get('/ws.v1/lrouter'))['results'] fake_lswitchports = jsonutils.loads( self.fc.handle_get('/ws.v1/lswitch/*/lport'))['results'] return_values = [ # Chunk 0 - lswitches (fake_lswitches, None, 4), # Chunk 0 - lrouters (fake_lrouters[:2], 'xxx', 4), # Chunk 0 - lports (size only) ([], 'start', 4), # Chunk 1 - lrouters (2 more) (lswitches are skipped) (fake_lrouters[2:], None, None), # Chunk 1 - lports (fake_lswitchports, None, 4)] def fake_fetch_data(*args, **kwargs): return return_values.pop(0) # 2 Chunks, with 6 resources each. # 1st chunk lswitches and lrouters # 2nd chunk lrouters and lports # Mock _fetch_data with mock.patch.object( self._plugin._synchronizer, '_fetch_data', side_effect=fake_fetch_data): sp = sync.SyncParameters(6) def do_chunk(chunk_idx, ls_cursor, lr_cursor, lp_cursor): self._plugin._synchronizer._synchronize_state(sp) self.assertEqual(chunk_idx, sp.current_chunk) self.assertEqual(ls_cursor, sp.ls_cursor) self.assertEqual(lr_cursor, sp.lr_cursor) self.assertEqual(lp_cursor, sp.lp_cursor) # check 1st chunk do_chunk(1, None, 'xxx', 'start') # check 2nd chunk do_chunk(0, None, None, None) # Chunk size should have stayed the same self.assertEqual(sp.chunk_size, 6) def test_synchronize_network(self): ctx = context.get_admin_context() with self._populate_data(ctx): # Put a network down to verify synchronization ls_uuid = list(self.fc._fake_lswitch_dict)[0] q_net_id = self._get_tag_dict( self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id'] self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false' self._plugin.get_network(ctx, q_net_id, fields=['status']) # Reload from db q_nets = self._plugin.get_networks(ctx) for q_net in q_nets: if q_net['id'] == q_net_id: exp_status = constants.NET_STATUS_DOWN else: exp_status = constants.NET_STATUS_ACTIVE self.assertEqual(exp_status, q_net['status']) def test_synchronize_network_not_found_in_db_no_raise(self): ctx = context.get_admin_context() with self._populate_data(ctx): # Put a network down to verify synchronization ls_uuid = list(self.fc._fake_lswitch_dict)[0] q_net_id = self._get_tag_dict( self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id'] self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false' q_net_data = self._plugin._get_network(ctx, q_net_id) with mock.patch.object(self._plugin, '_get_network') as _get_network: _get_network.side_effect = n_exc.NetworkNotFound( net_id=q_net_data['id']) self._plugin._synchronizer.synchronize_network(ctx, q_net_data) def test_synchronize_network_on_get(self): cfg.CONF.set_override('always_read_status', True, 'NSX_SYNC') ctx = context.get_admin_context() with self._populate_data(ctx): # Put a network down to verify punctual synchronization ls_uuid = list(self.fc._fake_lswitch_dict)[0] q_net_id = self._get_tag_dict( self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id'] self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false' q_net_data = self._plugin.get_network(ctx, q_net_id) self.assertEqual(constants.NET_STATUS_DOWN, q_net_data['status']) def test_synchronize_port_not_found_in_db_no_raise(self): ctx = context.get_admin_context() with self._populate_data(ctx): # Put a port down to verify synchronization lp_uuid = list(self.fc._fake_lswitch_lport_dict)[0] lport = self.fc._fake_lswitch_lport_dict[lp_uuid] q_port_id = self._get_tag_dict(lport['tags'])['q_port_id'] lport['status'] = 'true' q_port_data = self._plugin._get_port(ctx, q_port_id) with mock.patch.object(self._plugin, '_get_port') as _get_port: _get_port.side_effect = n_exc.PortNotFound( port_id=q_port_data['id']) self._plugin._synchronizer.synchronize_port(ctx, q_port_data) def test_synchronize_port(self): ctx = context.get_admin_context() with self._populate_data(ctx): # Put a port down to verify synchronization lp_uuid = list(self.fc._fake_lswitch_lport_dict)[0] lport = self.fc._fake_lswitch_lport_dict[lp_uuid] q_port_id = self._get_tag_dict(lport['tags'])['q_port_id'] lport['status'] = 'true' self._plugin.get_port(ctx, q_port_id, fields=['status']) # Reload from db q_ports = self._plugin.get_ports(ctx) for q_port in q_ports: if q_port['id'] == q_port_id: exp_status = constants.PORT_STATUS_ACTIVE else: exp_status = constants.PORT_STATUS_DOWN self.assertEqual(exp_status, q_port['status']) def test_synchronize_port_on_get(self): cfg.CONF.set_override('always_read_status', True, 'NSX_SYNC') ctx = context.get_admin_context() with self._populate_data(ctx): # Put a port down to verify punctual synchronization lp_uuid = list(self.fc._fake_lswitch_lport_dict)[0] lport = self.fc._fake_lswitch_lport_dict[lp_uuid] q_port_id = self._get_tag_dict(lport['tags'])['q_port_id'] lport['status'] = 'false' q_port_data = self._plugin.get_port(ctx, q_port_id) self.assertEqual(constants.PORT_STATUS_DOWN, q_port_data['status']) def test_synchronize_routernot_found_in_db_no_raise(self): ctx = context.get_admin_context() with self._populate_data(ctx): # Put a router down to verify synchronization lr_uuid = list(self.fc._fake_lrouter_dict)[0] q_rtr_id = self._get_tag_dict( self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id'] self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false' q_rtr_data = self._plugin._get_router(ctx, q_rtr_id) with mock.patch.object(self._plugin, '_get_router') as _get_router: _get_router.side_effect = l3_exc.RouterNotFound( router_id=q_rtr_data['id']) self._plugin._synchronizer.synchronize_router(ctx, q_rtr_data) # TODO(asarfaty): make this test pass with the new enginefacade def skip_test_synchronize_router(self): ctx = context.get_admin_context() with self._populate_data(ctx): # Put a router down to verify synchronization lr_uuid = list(self.fc._fake_lrouter_dict)[0] q_rtr_id = self._get_tag_dict( self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id'] self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false' self._plugin.get_router(ctx, q_rtr_id, fields=['status']) # Reload from db q_routers = self._plugin.get_routers(ctx) for q_rtr in q_routers: if q_rtr['id'] == q_rtr_id: exp_status = constants.NET_STATUS_DOWN else: exp_status = constants.NET_STATUS_ACTIVE self.assertEqual(exp_status, q_rtr['status']) # TODO(asarfaty): Make this test pass with the new enginefacade def skip_test_synchronize_router_nsx_mapping_not_found(self): ctx = context.get_admin_context() with self._populate_data(ctx): # Put a router down to verify synchronization lr_uuid = list(self.fc._fake_lrouter_dict)[0] q_rtr_id = self._get_tag_dict( self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id'] self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false' q_rtr_data = self._plugin._get_router(ctx, q_rtr_id) # delete router mapping from db. db.delete_neutron_nsx_router_mapping(ctx.session, q_rtr_id) # pop router from fake nsx client router_data = self.fc._fake_lrouter_dict.pop(lr_uuid) self._plugin._synchronizer.synchronize_router(ctx, q_rtr_data) # Reload from db q_routers = self._plugin.get_routers(ctx) for q_rtr in q_routers: if q_rtr['id'] == q_rtr_id: exp_status = constants.NET_STATUS_ERROR else: exp_status = constants.NET_STATUS_ACTIVE self.assertEqual(exp_status, q_rtr['status']) # put the router database since we don't handle missing # router data in the fake nsx api_client self.fc._fake_lrouter_dict[lr_uuid] = router_data def test_synchronize_router_on_get(self): cfg.CONF.set_override('always_read_status', True, 'NSX_SYNC') ctx = context.get_admin_context() with self._populate_data(ctx): # Put a router down to verify punctual synchronization lr_uuid = list(self.fc._fake_lrouter_dict)[0] q_rtr_id = self._get_tag_dict( self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id'] self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false' q_rtr_data = self._plugin.get_router(ctx, q_rtr_id) self.assertEqual(constants.NET_STATUS_DOWN, q_rtr_data['status']) def test_sync_nsx_failure_backoff(self): self.mock_api.return_value.request.side_effect = api_exc.RequestTimeout # chunk size won't matter here sp = sync.SyncParameters(999) for i in range(10): self.assertEqual( min(64, 2 ** i), self._plugin._synchronizer._synchronize_state(sp)) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_mh/apiclient/0000775000175100017510000000000013244524600023504 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_mh/apiclient/fake.py0000666000175100017510000007037113244523345025003 0ustar zuulzuul00000000000000# Copyright 2012 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import uuidutils import six import six.moves.urllib.parse as urlparse from vmware_nsx.api_client import exception as api_exc LOG = logging.getLogger(__name__) MAX_NAME_LEN = 40 def _validate_name(name): if name and len(name) > MAX_NAME_LEN: raise Exception("Logical switch name exceeds %d characters", MAX_NAME_LEN) def _validate_resource(body): _validate_name(body.get('display_name')) class FakeClient(object): LSWITCH_RESOURCE = 'lswitch' LPORT_RESOURCE = 'lport' LROUTER_RESOURCE = 'lrouter' NAT_RESOURCE = 'nat' LQUEUE_RESOURCE = 'lqueue' SECPROF_RESOURCE = 'securityprofile' LSWITCH_STATUS = 'lswitchstatus' LROUTER_STATUS = 'lrouterstatus' LSWITCH_LPORT_RESOURCE = 'lswitch_lport' LROUTER_LPORT_RESOURCE = 'lrouter_lport' LROUTER_NAT_RESOURCE = 'lrouter_nat' LSWITCH_LPORT_STATUS = 'lswitch_lportstatus' LSWITCH_LPORT_ATT = 'lswitch_lportattachment' LROUTER_LPORT_STATUS = 'lrouter_lportstatus' LROUTER_LPORT_ATT = 'lrouter_lportattachment' GWSERVICE_RESOURCE = 'gatewayservice' RESOURCES = [LSWITCH_RESOURCE, LROUTER_RESOURCE, LQUEUE_RESOURCE, LPORT_RESOURCE, NAT_RESOURCE, SECPROF_RESOURCE, GWSERVICE_RESOURCE] FAKE_GET_RESPONSES = { LSWITCH_RESOURCE: "fake_get_lswitch.json", LSWITCH_LPORT_RESOURCE: "fake_get_lswitch_lport.json", LSWITCH_LPORT_STATUS: "fake_get_lswitch_lport_status.json", LSWITCH_LPORT_ATT: "fake_get_lswitch_lport_att.json", LROUTER_RESOURCE: "fake_get_lrouter.json", LROUTER_LPORT_RESOURCE: "fake_get_lrouter_lport.json", LROUTER_LPORT_STATUS: "fake_get_lrouter_lport_status.json", LROUTER_LPORT_ATT: "fake_get_lrouter_lport_att.json", LROUTER_STATUS: "fake_get_lrouter_status.json", LROUTER_NAT_RESOURCE: "fake_get_lrouter_nat.json", SECPROF_RESOURCE: "fake_get_security_profile.json", LQUEUE_RESOURCE: "fake_get_lqueue.json", GWSERVICE_RESOURCE: "fake_get_gwservice.json" } FAKE_POST_RESPONSES = { LSWITCH_RESOURCE: "fake_post_lswitch.json", LROUTER_RESOURCE: "fake_post_lrouter.json", LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json", LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json", LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json", SECPROF_RESOURCE: "fake_post_security_profile.json", LQUEUE_RESOURCE: "fake_post_lqueue.json", GWSERVICE_RESOURCE: "fake_post_gwservice.json" } FAKE_PUT_RESPONSES = { LSWITCH_RESOURCE: "fake_post_lswitch.json", LROUTER_RESOURCE: "fake_post_lrouter.json", LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json", LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json", LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json", LSWITCH_LPORT_ATT: "fake_put_lswitch_lport_att.json", LROUTER_LPORT_ATT: "fake_put_lrouter_lport_att.json", SECPROF_RESOURCE: "fake_post_security_profile.json", LQUEUE_RESOURCE: "fake_post_lqueue.json", GWSERVICE_RESOURCE: "fake_post_gwservice.json" } MANAGED_RELATIONS = { LSWITCH_RESOURCE: [], LROUTER_RESOURCE: [], LSWITCH_LPORT_RESOURCE: ['LogicalPortAttachment'], LROUTER_LPORT_RESOURCE: ['LogicalPortAttachment'], } _validators = { LSWITCH_RESOURCE: _validate_resource, LSWITCH_LPORT_RESOURCE: _validate_resource, LROUTER_LPORT_RESOURCE: _validate_resource, SECPROF_RESOURCE: _validate_resource, LQUEUE_RESOURCE: _validate_resource, GWSERVICE_RESOURCE: _validate_resource } def __init__(self, fake_files_path): self.fake_files_path = fake_files_path self._fake_lswitch_dict = {} self._fake_lrouter_dict = {} self._fake_lswitch_lport_dict = {} self._fake_lrouter_lport_dict = {} self._fake_lrouter_nat_dict = {} self._fake_lswitch_lportstatus_dict = {} self._fake_lrouter_lportstatus_dict = {} self._fake_securityprofile_dict = {} self._fake_lqueue_dict = {} self._fake_gatewayservice_dict = {} def _get_tag(self, resource, scope): tags = [tag['tag'] for tag in resource['tags'] if tag['scope'] == scope] return len(tags) > 0 and tags[0] def _get_filters(self, querystring): if not querystring: return (None, None, None, None) params = urlparse.parse_qs(querystring) tag_filter = None attr_filter = None if 'tag' in params and 'tag_scope' in params: tag_filter = {'scope': params['tag_scope'][0], 'tag': params['tag'][0]} elif 'uuid' in params: attr_filter = {'uuid': params['uuid'][0]} # Handle page length and page cursor parameter page_len = params.get('_page_length') page_cursor = params.get('_page_cursor') if page_len: page_len = int(page_len[0]) else: # Explicitly set it to None (avoid 0 or empty list) page_len = None return (tag_filter, attr_filter, page_len, page_cursor) def _add_lswitch(self, body): fake_lswitch = jsonutils.loads(body) fake_lswitch['uuid'] = uuidutils.generate_uuid() self._fake_lswitch_dict[fake_lswitch['uuid']] = fake_lswitch # put the tenant_id and the zone_uuid in the main dict # for simplyfying templating zone_uuid = fake_lswitch['transport_zones'][0]['zone_uuid'] fake_lswitch['zone_uuid'] = zone_uuid fake_lswitch['tenant_id'] = self._get_tag(fake_lswitch, 'os_tid') fake_lswitch['lport_count'] = 0 # set status value fake_lswitch['status'] = 'true' return fake_lswitch def _build_lrouter(self, body, uuid=None): fake_lrouter = jsonutils.loads(body) if uuid: fake_lrouter['uuid'] = uuid fake_lrouter['tenant_id'] = self._get_tag(fake_lrouter, 'os_tid') default_nexthop = fake_lrouter['routing_config'].get( 'default_route_next_hop') if default_nexthop: fake_lrouter['default_next_hop'] = default_nexthop.get( 'gateway_ip_address', '0.0.0.0') else: fake_lrouter['default_next_hop'] = '0.0.0.0' # NOTE(salv-orlando): We won't make the Fake NSX API client # aware of NSX version. The long term plan is to replace it # with behavioral mocking of NSX API requests if 'distributed' not in fake_lrouter: fake_lrouter['distributed'] = False distributed_json = ('"distributed": %s,' % str(fake_lrouter['distributed']).lower()) fake_lrouter['distributed_json'] = distributed_json return fake_lrouter def _add_lrouter(self, body): fake_lrouter = self._build_lrouter(body, uuidutils.generate_uuid()) self._fake_lrouter_dict[fake_lrouter['uuid']] = fake_lrouter fake_lrouter['lport_count'] = 0 # set status value fake_lrouter['status'] = 'true' return fake_lrouter def _add_lqueue(self, body): fake_lqueue = jsonutils.loads(body) fake_lqueue['uuid'] = uuidutils.generate_uuid() self._fake_lqueue_dict[fake_lqueue['uuid']] = fake_lqueue return fake_lqueue def _add_lswitch_lport(self, body, ls_uuid): fake_lport = jsonutils.loads(body) new_uuid = uuidutils.generate_uuid() fake_lport['uuid'] = new_uuid # put the tenant_id and the ls_uuid in the main dict # for simplyfying templating fake_lport['ls_uuid'] = ls_uuid fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid') fake_lport['neutron_port_id'] = self._get_tag(fake_lport, 'q_port_id') fake_lport['neutron_device_id'] = self._get_tag(fake_lport, 'vm_id') fake_lport['att_type'] = "NoAttachment" fake_lport['att_info_json'] = '' self._fake_lswitch_lport_dict[fake_lport['uuid']] = fake_lport fake_lswitch = self._fake_lswitch_dict[ls_uuid] fake_lswitch['lport_count'] += 1 fake_lport_status = fake_lport.copy() fake_lport_status['ls_tenant_id'] = fake_lswitch['tenant_id'] fake_lport_status['ls_uuid'] = fake_lswitch['uuid'] fake_lport_status['ls_name'] = fake_lswitch['display_name'] fake_lport_status['ls_zone_uuid'] = fake_lswitch['zone_uuid'] # set status value fake_lport['status'] = 'true' self._fake_lswitch_lportstatus_dict[new_uuid] = fake_lport_status return fake_lport def _build_lrouter_lport(self, body, new_uuid=None, lr_uuid=None): fake_lport = jsonutils.loads(body) if new_uuid: fake_lport['uuid'] = new_uuid # put the tenant_id and the le_uuid in the main dict # for simplyfying templating if lr_uuid: fake_lport['lr_uuid'] = lr_uuid fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid') fake_lport['neutron_port_id'] = self._get_tag(fake_lport, 'q_port_id') # replace ip_address with its json dump if 'ip_addresses' in fake_lport: ip_addresses_json = jsonutils.dumps(fake_lport['ip_addresses']) fake_lport['ip_addresses_json'] = ip_addresses_json return fake_lport def _add_lrouter_lport(self, body, lr_uuid): new_uuid = uuidutils.generate_uuid() fake_lport = self._build_lrouter_lport(body, new_uuid, lr_uuid) self._fake_lrouter_lport_dict[fake_lport['uuid']] = fake_lport try: fake_lrouter = self._fake_lrouter_dict[lr_uuid] except KeyError: raise api_exc.ResourceNotFound() fake_lrouter['lport_count'] += 1 fake_lport_status = fake_lport.copy() fake_lport_status['lr_tenant_id'] = fake_lrouter['tenant_id'] fake_lport_status['lr_uuid'] = fake_lrouter['uuid'] fake_lport_status['lr_name'] = fake_lrouter['display_name'] self._fake_lrouter_lportstatus_dict[new_uuid] = fake_lport_status return fake_lport def _add_securityprofile(self, body): fake_securityprofile = jsonutils.loads(body) fake_securityprofile['uuid'] = uuidutils.generate_uuid() fake_securityprofile['tenant_id'] = self._get_tag( fake_securityprofile, 'os_tid') fake_securityprofile['nova_spid'] = self._get_tag(fake_securityprofile, 'nova_spid') self._fake_securityprofile_dict[fake_securityprofile['uuid']] = ( fake_securityprofile) return fake_securityprofile def _add_lrouter_nat(self, body, lr_uuid): fake_nat = jsonutils.loads(body) new_uuid = uuidutils.generate_uuid() fake_nat['uuid'] = new_uuid fake_nat['lr_uuid'] = lr_uuid self._fake_lrouter_nat_dict[fake_nat['uuid']] = fake_nat if 'match' in fake_nat: match_json = jsonutils.dumps(fake_nat['match']) fake_nat['match_json'] = match_json return fake_nat def _add_gatewayservice(self, body): fake_gwservice = jsonutils.loads(body) fake_gwservice['uuid'] = str(uuidutils.generate_uuid()) fake_gwservice['tenant_id'] = self._get_tag( fake_gwservice, 'os_tid') # FIXME(salvatore-orlando): For simplicity we're managing only a # single device. Extend the fake client for supporting multiple devices first_gw = fake_gwservice['gateways'][0] fake_gwservice['transport_node_uuid'] = first_gw['transport_node_uuid'] fake_gwservice['device_id'] = first_gw['device_id'] self._fake_gatewayservice_dict[fake_gwservice['uuid']] = ( fake_gwservice) return fake_gwservice def _build_relation(self, src, dst, resource_type, relation): if relation not in self.MANAGED_RELATIONS[resource_type]: return # Relation is not desired in output if '_relations' not in src or not src['_relations'].get(relation): return # Item does not have relation relation_data = src['_relations'].get(relation) dst_relations = dst.get('_relations', {}) dst_relations[relation] = relation_data dst['_relations'] = dst_relations def _fill_attachment(self, att_data, ls_uuid=None, lr_uuid=None, lp_uuid=None): new_data = att_data.copy() for k in ('ls_uuid', 'lr_uuid', 'lp_uuid'): if locals().get(k): new_data[k] = locals()[k] def populate_field(field_name): if field_name in att_data: new_data['%s_field' % field_name] = ('"%s" : "%s",' % (field_name, att_data[field_name])) del new_data[field_name] else: new_data['%s_field' % field_name] = "" for field in ['vif_uuid', 'peer_port_href', 'vlan_id', 'peer_port_uuid', 'l3_gateway_service_uuid']: populate_field(field) return new_data def _get_resource_type(self, path): """Get resource type. Identifies resource type and relevant uuids in the uri /ws.v1/lswitch/xxx /ws.v1/lswitch/xxx/status /ws.v1/lswitch/xxx/lport/yyy /ws.v1/lswitch/xxx/lport/yyy/status /ws.v1/lrouter/zzz /ws.v1/lrouter/zzz/status /ws.v1/lrouter/zzz/lport/www /ws.v1/lrouter/zzz/lport/www/status /ws.v1/lqueue/xxx """ # The first element will always be 'ws.v1' - so we just discard it uri_split = path.split('/')[1:] # parse uri_split backwards suffix = "" idx = len(uri_split) - 1 if 'status' in uri_split[idx]: suffix = "status" idx = idx - 1 elif 'attachment' in uri_split[idx]: suffix = "attachment" idx = idx - 1 # then check if we have an uuid uuids = [] if uri_split[idx].replace('-', '') not in self.RESOURCES: uuids.append(uri_split[idx]) idx = idx - 1 resource_type = "%s%s" % (uri_split[idx], suffix) if idx > 1: uuids.insert(0, uri_split[idx - 1]) resource_type = "%s_%s" % (uri_split[idx - 2], resource_type) return (resource_type.replace('-', ''), uuids) def _list(self, resource_type, response_file, parent_uuid=None, query=None, relations=None): (tag_filter, attr_filter, page_len, page_cursor) = self._get_filters(query) # result_count attribute in response should appear only when # page_cursor is not specified do_result_count = not page_cursor with open("%s/%s" % (self.fake_files_path, response_file)) as f: response_template = f.read() res_dict = getattr(self, '_fake_%s_dict' % resource_type) if parent_uuid == '*': parent_uuid = None # NSX raises ResourceNotFound if lswitch doesn't exist and is not * elif not res_dict and resource_type == self.LSWITCH_LPORT_RESOURCE: raise api_exc.ResourceNotFound() def _attr_match(res_uuid): if not attr_filter: return True item = res_dict[res_uuid] for (attr, value) in six.iteritems(attr_filter): if item.get(attr) != value: return False return True def _tag_match(res_uuid): if not tag_filter: return True return any([x['scope'] == tag_filter['scope'] and x['tag'] == tag_filter['tag'] for x in res_dict[res_uuid]['tags']]) def _lswitch_match(res_uuid): # verify that the switch exist if parent_uuid and parent_uuid not in self._fake_lswitch_dict: raise Exception(_("lswitch:%s not found") % parent_uuid) if (not parent_uuid or res_dict[res_uuid].get('ls_uuid') == parent_uuid): return True return False def _lrouter_match(res_uuid): # verify that the router exist if parent_uuid and parent_uuid not in self._fake_lrouter_dict: raise api_exc.ResourceNotFound() if (not parent_uuid or res_dict[res_uuid].get('lr_uuid') == parent_uuid): return True return False def _cursor_match(res_uuid, page_cursor): if not page_cursor: return True if page_cursor == res_uuid: # always return True once page_cursor has been found page_cursor = None return True return False def _build_item(resource): item = jsonutils.loads(response_template % resource) if relations: for relation in relations: self._build_relation(resource, item, resource_type, relation) return item for item in res_dict.values(): if 'tags' in item: item['tags_json'] = jsonutils.dumps(item['tags']) if resource_type in (self.LSWITCH_LPORT_RESOURCE, self.LSWITCH_LPORT_ATT, self.LSWITCH_LPORT_STATUS): parent_func = _lswitch_match elif resource_type in (self.LROUTER_LPORT_RESOURCE, self.LROUTER_LPORT_ATT, self.LROUTER_NAT_RESOURCE, self.LROUTER_LPORT_STATUS): parent_func = _lrouter_match else: parent_func = lambda x: True items = [_build_item(res_dict[res_uuid]) for res_uuid in res_dict if (parent_func(res_uuid) and _tag_match(res_uuid) and _attr_match(res_uuid) and _cursor_match(res_uuid, page_cursor))] # Rather inefficient, but hey this is just a mock! next_cursor = None total_items = len(items) if page_len: try: next_cursor = items[page_len]['uuid'] except IndexError: next_cursor = None items = items[:page_len] response_dict = {'results': items} if next_cursor: response_dict['page_cursor'] = next_cursor if do_result_count: response_dict['result_count'] = total_items return jsonutils.dumps(response_dict) def _show(self, resource_type, response_file, uuid1, uuid2=None, relations=None): target_uuid = uuid2 or uuid1 if resource_type.endswith('attachment'): resource_type = resource_type[:resource_type.index('attachment')] with open("%s/%s" % (self.fake_files_path, response_file)) as f: response_template = f.read() res_dict = getattr(self, '_fake_%s_dict' % resource_type) for item in res_dict.values(): if 'tags' in item: item['tags_json'] = jsonutils.dumps(item['tags']) # replace sec prof rules with their json dump def jsonify_rules(rule_key): if rule_key in item: rules_json = jsonutils.dumps(item[rule_key]) item['%s_json' % rule_key] = rules_json jsonify_rules('logical_port_egress_rules') jsonify_rules('logical_port_ingress_rules') items = [jsonutils.loads(response_template % res_dict[res_uuid]) for res_uuid in res_dict if res_uuid == target_uuid] if items: return jsonutils.dumps(items[0]) raise api_exc.ResourceNotFound() def handle_get(self, url): #TODO(salvatore-orlando): handle field selection parsedurl = urlparse.urlparse(url) (res_type, uuids) = self._get_resource_type(parsedurl.path) relations = urlparse.parse_qs(parsedurl.query).get('relations') response_file = self.FAKE_GET_RESPONSES.get(res_type) if not response_file: raise api_exc.NsxApiException() if 'lport' in res_type or 'nat' in res_type: if len(uuids) > 1: return self._show(res_type, response_file, uuids[0], uuids[1], relations=relations) else: return self._list(res_type, response_file, uuids[0], query=parsedurl.query, relations=relations) elif ('lswitch' in res_type or 'lrouter' in res_type or self.SECPROF_RESOURCE in res_type or self.LQUEUE_RESOURCE in res_type or 'gatewayservice' in res_type): LOG.debug("UUIDS:%s", uuids) if uuids: return self._show(res_type, response_file, uuids[0], relations=relations) else: return self._list(res_type, response_file, query=parsedurl.query, relations=relations) else: raise Exception("unknown resource:%s" % res_type) def handle_post(self, url, body): parsedurl = urlparse.urlparse(url) (res_type, uuids) = self._get_resource_type(parsedurl.path) response_file = self.FAKE_POST_RESPONSES.get(res_type) if not response_file: raise Exception("resource not found") with open("%s/%s" % (self.fake_files_path, response_file)) as f: response_template = f.read() add_resource = getattr(self, '_add_%s' % res_type) body_json = jsonutils.loads(body) val_func = self._validators.get(res_type) if val_func: val_func(body_json) args = [body] if uuids: args.append(uuids[0]) response = response_template % add_resource(*args) return response def handle_put(self, url, body): parsedurl = urlparse.urlparse(url) (res_type, uuids) = self._get_resource_type(parsedurl.path) response_file = self.FAKE_PUT_RESPONSES.get(res_type) if not response_file: raise Exception("resource not found") with open("%s/%s" % (self.fake_files_path, response_file)) as f: response_template = f.read() # Manage attachment operations is_attachment = False if res_type.endswith('attachment'): is_attachment = True res_type = res_type[:res_type.index('attachment')] res_dict = getattr(self, '_fake_%s_dict' % res_type) body_json = jsonutils.loads(body) val_func = self._validators.get(res_type) if val_func: val_func(body_json) try: resource = res_dict[uuids[-1]] except KeyError: raise api_exc.ResourceNotFound() if not is_attachment: edit_resource = getattr(self, '_build_%s' % res_type, None) if edit_resource: body_json = edit_resource(body) resource.update(body_json) else: relations = resource.get("_relations", {}) body_2 = jsonutils.loads(body) resource['att_type'] = body_2['type'] relations['LogicalPortAttachment'] = body_2 resource['_relations'] = relations if body_2['type'] == "PatchAttachment": # We need to do a trick here if self.LROUTER_RESOURCE in res_type: res_type_2 = res_type.replace(self.LROUTER_RESOURCE, self.LSWITCH_RESOURCE) elif self.LSWITCH_RESOURCE in res_type: res_type_2 = res_type.replace(self.LSWITCH_RESOURCE, self.LROUTER_RESOURCE) res_dict_2 = getattr(self, '_fake_%s_dict' % res_type_2) body_2['peer_port_uuid'] = uuids[-1] resource_2 = \ res_dict_2[jsonutils.loads(body)['peer_port_uuid']] relations_2 = resource_2.get("_relations") if not relations_2: relations_2 = {} relations_2['LogicalPortAttachment'] = body_2 resource_2['_relations'] = relations_2 resource['peer_port_uuid'] = body_2['peer_port_uuid'] resource['att_info_json'] = ( "\"peer_port_uuid\": \"%s\"," % resource_2['uuid']) resource_2['att_info_json'] = ( "\"peer_port_uuid\": \"%s\"," % body_2['peer_port_uuid']) elif body_2['type'] == "L3GatewayAttachment": resource['attachment_gwsvc_uuid'] = ( body_2['l3_gateway_service_uuid']) resource['vlan_id'] = body_2.get('vlan_id') elif body_2['type'] == "L2GatewayAttachment": resource['attachment_gwsvc_uuid'] = ( body_2['l2_gateway_service_uuid']) elif body_2['type'] == "VifAttachment": resource['vif_uuid'] = body_2['vif_uuid'] resource['att_info_json'] = ( "\"vif_uuid\": \"%s\"," % body_2['vif_uuid']) if not is_attachment: response = response_template % resource else: if res_type == self.LROUTER_LPORT_RESOURCE: lr_uuid = uuids[0] ls_uuid = None elif res_type == self.LSWITCH_LPORT_RESOURCE: ls_uuid = uuids[0] lr_uuid = None lp_uuid = uuids[1] response = response_template % self._fill_attachment( jsonutils.loads(body), ls_uuid, lr_uuid, lp_uuid) return response def handle_delete(self, url): parsedurl = urlparse.urlparse(url) (res_type, uuids) = self._get_resource_type(parsedurl.path) response_file = self.FAKE_PUT_RESPONSES.get(res_type) if not response_file: raise Exception("resource not found") res_dict = getattr(self, '_fake_%s_dict' % res_type) try: del res_dict[uuids[-1]] except KeyError: raise api_exc.ResourceNotFound() return "" def fake_request(self, *args, **kwargs): method = args[0] handler = getattr(self, "handle_%s" % method.lower()) return handler(*args[1:]) def reset_all(self): self._fake_lswitch_dict.clear() self._fake_lrouter_dict.clear() self._fake_lswitch_lport_dict.clear() self._fake_lrouter_lport_dict.clear() self._fake_lswitch_lportstatus_dict.clear() self._fake_lrouter_lportstatus_dict.clear() self._fake_lqueue_dict.clear() self._fake_securityprofile_dict.clear() self._fake_gatewayservice_dict.clear() vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_mh/apiclient/test_api_eventlet_request.py0000666000175100017510000003045313244523345031360 0ustar zuulzuul00000000000000# Copyright (C) 2009-2012 VMware, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import urllib import eventlet import mock from neutron.tests import base from oslo_log import log as logging from six.moves import http_client as httplib from vmware_nsx.api_client import ( eventlet_client as client) from vmware_nsx.api_client import ( eventlet_request as request) from vmware_nsx.tests import unit as vmware LOG = logging.getLogger(__name__) REQUEST_TIMEOUT = 1 def fetch(url): return urllib.urlopen(url).read() class ApiRequestEventletTest(base.BaseTestCase): def setUp(self): super(ApiRequestEventletTest, self).setUp() self.client = client.EventletApiClient( [("127.0.0.1", 4401, True)], "admin", "admin") self.url = "/ws.v1/_debug" self.req = request.EventletApiRequest(self.client, self.url) def tearDown(self): self.client = None self.req = None super(ApiRequestEventletTest, self).tearDown() def test_construct_eventlet_api_request(self): e = request.EventletApiRequest(self.client, self.url) self.assertIsNotNone(e) def test_apirequest_spawn(self): def x(id): eventlet.greenthread.sleep(random.random()) LOG.info('spawned: %d', id) for i in range(10): request.EventletApiRequest._spawn(x, i) def test_apirequest_start(self): for i in range(10): a = request.EventletApiRequest( self.client, self.url) a._handle_request = mock.Mock() a.start() eventlet.greenthread.sleep(0.1) LOG.info('_handle_request called: %s', a._handle_request.called) request.EventletApiRequest.joinall() def test_join_with_handle_request(self): self.req._handle_request = mock.Mock() self.req.start() self.req.join() self.assertTrue(self.req._handle_request.called) def test_join_without_handle_request(self): self.req._handle_request = mock.Mock() self.req.join() self.assertFalse(self.req._handle_request.called) def test_copy(self): req = self.req.copy() for att in [ '_api_client', '_url', '_method', '_body', '_headers', '_http_timeout', '_request_timeout', '_retries', '_redirects', '_auto_login']: self.assertTrue(getattr(req, att) is getattr(self.req, att)) def test_request_error(self): self.assertIsNone(self.req.request_error) def test_run_and_handle_request(self): self.req._request_timeout = None self.req._handle_request = mock.Mock() self.req.start() self.req.join() self.assertTrue(self.req._handle_request.called) def test_run_and_timeout(self): def my_handle_request(): LOG.info('my_handle_request() self: %s', self.req) LOG.info('my_handle_request() dir(self): %s', dir(self.req)) eventlet.greenthread.sleep(REQUEST_TIMEOUT * 2) with mock.patch.object( self.req, '_handle_request', new=my_handle_request ): self.req._request_timeout = REQUEST_TIMEOUT self.req.start() self.assertIsNone(self.req.join()) def prep_issue_request(self): mysock = mock.Mock() mysock.gettimeout.return_value = 4242 myresponse = mock.Mock() myresponse.read.return_value = 'body' myresponse.getheaders.return_value = 'headers' myresponse.status = httplib.MOVED_PERMANENTLY myconn = mock.Mock() myconn.request.return_value = None myconn.sock = mysock myconn.getresponse.return_value = myresponse myconn.__str__ = mock.Mock() myconn.__str__.return_value = 'myconn string' req = self.req req._redirect_params = mock.Mock() req._redirect_params.return_value = (myconn, 'url') req._request_str = mock.Mock() req._request_str.return_value = 'http://cool/cool' client = self.client client.need_login = False client._auto_login = False client._auth_cookie = False client.acquire_connection = mock.Mock() client.acquire_connection.return_value = myconn client.release_connection = mock.Mock() return (mysock, myresponse, myconn) def test_issue_request_trigger_exception(self): (mysock, myresponse, myconn) = self.prep_issue_request() self.client.acquire_connection.return_value = None self.req._issue_request() self.assertIsInstance(self.req._request_error, Exception) self.assertTrue(self.client.acquire_connection.called) def test_issue_request_handle_none_sock(self): (mysock, myresponse, myconn) = self.prep_issue_request() myconn.sock = None self.req.start() self.assertIsNone(self.req.join()) self.assertTrue(self.client.acquire_connection.called) def test_issue_request_exceed_maximum_retries(self): (mysock, myresponse, myconn) = self.prep_issue_request() self.req.start() self.assertIsNone(self.req.join()) self.assertTrue(self.client.acquire_connection.called) def test_issue_request_trigger_non_redirect(self): (mysock, myresponse, myconn) = self.prep_issue_request() myresponse.status = httplib.OK self.req.start() self.assertIsNone(self.req.join()) self.assertTrue(self.client.acquire_connection.called) def test_issue_request_trigger_internal_server_error(self): (mysock, myresponse, myconn) = self.prep_issue_request() self.req._redirect_params.return_value = (myconn, None) self.req.start() self.assertIsNone(self.req.join()) self.assertTrue(self.client.acquire_connection.called) def test_redirect_params_break_on_location(self): myconn = mock.Mock() (conn, retval) = self.req._redirect_params( myconn, [('location', None)]) self.assertIsNone(retval) def test_redirect_params_parse_a_url(self): myconn = mock.Mock() (conn, retval) = self.req._redirect_params( myconn, [('location', '/path/a/b/c')]) self.assertIsNotNone(retval) def test_redirect_params_invalid_redirect_location(self): myconn = mock.Mock() (conn, retval) = self.req._redirect_params( myconn, [('location', '+path/a/b/c')]) self.assertIsNone(retval) def test_redirect_params_invalid_scheme(self): myconn = mock.Mock() (conn, retval) = self.req._redirect_params( myconn, [('location', 'invalidscheme://hostname:1/path')]) self.assertIsNone(retval) def test_redirect_params_setup_https_with_cooki(self): with mock.patch(vmware.CLIENT_NAME) as mock_client: api_client = mock_client.return_value self.req._api_client = api_client myconn = mock.Mock() (conn, retval) = self.req._redirect_params( myconn, [('location', 'https://host:1/path')]) self.assertIsNotNone(retval) self.assertTrue(api_client.acquire_redirect_connection.called) def test_redirect_params_setup_htttps_and_query(self): with mock.patch(vmware.CLIENT_NAME) as mock_client: api_client = mock_client.return_value self.req._api_client = api_client myconn = mock.Mock() (conn, retval) = self.req._redirect_params(myconn, [ ('location', 'https://host:1/path?q=1')]) self.assertIsNotNone(retval) self.assertTrue(api_client.acquire_redirect_connection.called) def test_redirect_params_setup_https_connection_no_cookie(self): with mock.patch(vmware.CLIENT_NAME) as mock_client: api_client = mock_client.return_value self.req._api_client = api_client myconn = mock.Mock() (conn, retval) = self.req._redirect_params(myconn, [ ('location', 'https://host:1/path')]) self.assertIsNotNone(retval) self.assertTrue(api_client.acquire_redirect_connection.called) def test_redirect_params_setup_https_and_query_no_cookie(self): with mock.patch(vmware.CLIENT_NAME) as mock_client: api_client = mock_client.return_value self.req._api_client = api_client myconn = mock.Mock() (conn, retval) = self.req._redirect_params( myconn, [('location', 'https://host:1/path?q=1')]) self.assertIsNotNone(retval) self.assertTrue(api_client.acquire_redirect_connection.called) def test_redirect_params_path_only_with_query(self): with mock.patch(vmware.CLIENT_NAME) as mock_client: api_client = mock_client.return_value api_client.wait_for_login.return_value = None api_client.auth_cookie = None api_client.acquire_connection.return_value = True myconn = mock.Mock() (conn, retval) = self.req._redirect_params(myconn, [ ('location', '/path?q=1')]) self.assertIsNotNone(retval) def test_handle_request_auto_login(self): self.req._auto_login = True self.req._api_client = mock.Mock() self.req._api_client.need_login = True self.req._request_str = mock.Mock() self.req._request_str.return_value = 'http://cool/cool' self.req.spawn = mock.Mock() self.req._handle_request() def test_handle_request_auto_login_unauth(self): self.req._auto_login = True self.req._api_client = mock.Mock() self.req._api_client.need_login = True self.req._request_str = mock.Mock() self.req._request_str.return_value = 'http://cool/cool' import socket resp = httplib.HTTPResponse(socket.socket()) resp.status = httplib.UNAUTHORIZED mywaiter = mock.Mock() mywaiter.wait = mock.Mock() mywaiter.wait.return_value = resp self.req.spawn = mock.Mock(return_value=mywaiter) self.req._handle_request() def test_construct_eventlet_login_request(self): r = request.LoginRequestEventlet(self.client, 'user', 'password') self.assertIsNotNone(r) def test_session_cookie_session_cookie_retrieval(self): r = request.LoginRequestEventlet(self.client, 'user', 'password') r.successful = mock.Mock() r.successful.return_value = True r.value = mock.Mock() r.value.get_header = mock.Mock() r.value.get_header.return_value = 'cool' self.assertIsNotNone(r.session_cookie()) def test_session_cookie_not_retrieved(self): r = request.LoginRequestEventlet(self.client, 'user', 'password') r.successful = mock.Mock() r.successful.return_value = False r.value = mock.Mock() r.value.get_header = mock.Mock() r.value.get_header.return_value = 'cool' self.assertIsNone(r.session_cookie()) def test_construct_eventlet_get_api_providers_request(self): r = request.GetApiProvidersRequestEventlet(self.client) self.assertIsNotNone(r) def test_api_providers_none_api_providers(self): r = request.GetApiProvidersRequestEventlet(self.client) r.successful = mock.Mock(return_value=False) self.assertIsNone(r.api_providers()) def test_api_providers_non_none_api_providers(self): r = request.GetApiProvidersRequestEventlet(self.client) r.value = mock.Mock() r.value.body = """{ "results": [ { "roles": [ { "role": "api_provider", "listen_addr": "pssl:1.1.1.1:1" }]}]}""" r.successful = mock.Mock(return_value=True) LOG.info('%s', r.api_providers()) self.assertIsNotNone(r.api_providers()) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_mh/apiclient/__init__.py0000666000175100017510000000000013244523345025612 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_mh/apiclient/test_api_common.py0000666000175100017510000000240713244523345027250 0ustar zuulzuul00000000000000# Copyright 2011 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.tests import base from six.moves import http_client as httplib from vmware_nsx import api_client class ApiCommonTest(base.BaseTestCase): def test_ctrl_conn_to_str(self): conn = httplib.HTTPSConnection('localhost', 4242, timeout=0) self.assertTrue( api_client.ctrl_conn_to_str(conn) == 'https://localhost:4242') conn = httplib.HTTPConnection('localhost', 4242, timeout=0) self.assertTrue( api_client.ctrl_conn_to_str(conn) == 'http://localhost:4242') self.assertRaises(TypeError, api_client.ctrl_conn_to_str, ('not an httplib.HTTPSConnection')) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/0000775000175100017510000000000013244524600021375 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/test_lbaas_common.py0000666000175100017510000001211513244523345025447 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests import base from vmware_nsx.plugins.nsx_v.vshield import vcns_driver from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common EDGE_ID = 'edge-x' POOL_ID = 'b3dfb476-6fdf-4ddd-b6bd-e86ae78dc30b' def firewall_section_maker(if_ip_list, vip_ip_list): return ( '
' + POOL_ID + 'allow' 'Ipv4Address' + ','.join(if_ip_list) + '' 'Ipv4Address' + ','.join(vip_ip_list) + '' '
') def if_maker(ip_list): intf = { 'index': 1, 'name': 'internal1', 'addressGroups': { 'addressGroups': [ {'subnetPrefixLength': '24', 'secondaryAddresses': { 'ipAddress': ip_list, 'type': 'secondary_addresses'}, 'primaryAddress': '10.0.0.1', 'subnetMask': '255.255.255.0'}]}, 'portgroupName': 'pg1234', 'label': 'vNic_1', 'type': 'internal', 'portgroupId': 'virtualwire-31'} return intf def if_list_maker(ip_list): if_list = { 'vnics': [ {'index': 0, 'name': 'external', 'addressGroups': { 'addressGroups': [ {'subnetMask': '255.255.255.0', 'primaryAddress': '172.24.4.2', 'subnetPrefixLength': '24'}]}, 'portgroupName': 'VM Network', 'label': 'vNic_0', 'type': 'uplink', 'portgroupId': 'network-13'}, {'index': 1, 'name': 'internal1', 'addressGroups': { 'addressGroups': [ {'subnetPrefixLength': '24', 'secondaryAddresses': { 'ipAddress': ip_list, 'type': 'secondary_addresses'}, 'primaryAddress': '10.0.0.1', 'subnetMask': '255.255.255.0'}]}, 'portgroupName': 'pg1234', 'label': 'vNic_1', 'type': 'internal', 'portgroupId': 'virtualwire-31'}, {'index': 2, 'name': 'vnic2', 'addressGroups': {'addressGroups': []}, 'label': 'vNic_2', 'type': 'internal'}, {'index': 3, 'name': 'vnic3', 'addressGroups': {'addressGroups': []}, 'label': 'vNic_3', 'type': 'internal'}]} return if_list class TestLbaasCommon(base.BaseTestCase): def setUp(self): super(TestLbaasCommon, self).setUp() callbacks = mock.Mock() callbacks.plugin = mock.Mock() self.edge_driver = vcns_driver.VcnsDriver(callbacks) self.edge_driver._lb_driver_prop = mock.Mock() def _mock_edge_driver_vcns(self, attr): return mock.patch.object(self.edge_driver.vcns, attr) def test_add_vip_as_secondary_ip(self): update_if = if_maker(['10.0.0.6', '10.0.0.8']) with self._mock_edge_driver_vcns('get_interfaces') as mock_get_if,\ self._mock_edge_driver_vcns( 'update_interface') as mock_update_if: mock_get_if.return_value = (None, if_list_maker(['10.0.0.6'])) lb_common.add_vip_as_secondary_ip( self.edge_driver.vcns, EDGE_ID, '10.0.0.8') mock_update_if.assert_called_with(EDGE_ID, update_if) def test_del_vip_as_secondary_ip(self): update_if = if_maker(['10.0.0.6']) with self._mock_edge_driver_vcns('get_interfaces') as mock_get_if,\ self._mock_edge_driver_vcns( 'update_interface') as mock_update_if: mock_get_if.return_value = (None, if_list_maker(['10.0.0.6', '10.0.0.8'])) lb_common.del_vip_as_secondary_ip( self.edge_driver.vcns, EDGE_ID, '10.0.0.8') mock_update_if.assert_called_with(EDGE_ID, update_if) def test_get_edge_ip_addresses(self): get_if_list = if_list_maker(['10.0.0.6']) with mock.patch.object(self.edge_driver.vcns, 'get_interfaces', return_value=(None, get_if_list)): ip_list = lb_common.get_edge_ip_addresses(self.edge_driver.vcns, EDGE_ID) self.assertEqual(['172.24.4.2', '10.0.0.1'], ip_list) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/test_misc.py0000666000175100017510000000632713244523345023760 0ustar zuulzuul00000000000000# Copyright (c) 2014 VMware. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron.tests import base from vmware_nsx.plugins.nsx_v.vshield.common import exceptions from vmware_nsx.plugins.nsx_v.vshield import vcns def raise_until_attempt(attempt, exception): def raises_until(): if raises_until.current_attempt < attempt: raises_until.current_attempt += 1 raise exception else: return raises_until.current_attempt raises_until.current_attempt = 1 return raises_until class TestMisc(base.BaseTestCase): response = """
Dummy
1 core-services
""" def test_retry_on_exception_one_attempt(self): success_on_first_attempt = raise_until_attempt( 1, exceptions.RequestBad(uri='', response='')) should_return_one = vcns.retry_upon_exception( exceptions.RequestBad, max_attempts=1)(success_on_first_attempt) self.assertEqual(1, should_return_one()) def test_retry_on_exception_five_attempts(self): success_on_fifth_attempt = raise_until_attempt( 5, exceptions.RequestBad(uri='', response='')) should_return_five = vcns.retry_upon_exception( exceptions.RequestBad, max_attempts=10)(success_on_fifth_attempt) self.assertEqual(5, should_return_five()) def test_retry_on_exception_exceed_attempts(self): success_on_fifth_attempt = raise_until_attempt( 5, exceptions.RequestBad(uri='', response='')) should_raise = vcns.retry_upon_exception( exceptions.RequestBad, max_attempts=4)(success_on_fifth_attempt) self.assertRaises(exceptions.RequestBad, should_raise) def test_retry_on_exception_exclude_error_codes_retry(self): success_on_fifth_attempt = raise_until_attempt( 5, exceptions.RequestBad(uri='', response=self.response)) # excluding another error code, so should retry should_return_five = vcns.retry_upon_exception_exclude_error_codes( exceptions.RequestBad, [2], max_attempts=10)(success_on_fifth_attempt) self.assertEqual(5, should_return_five()) def test_retry_on_exception_exclude_error_codes_raise(self): success_on_fifth_attempt = raise_until_attempt( 5, exceptions.RequestBad(uri='', response=self.response)) # excluding the returned error code, so no retries are expected should_raise = vcns.retry_upon_exception_exclude_error_codes( exceptions.RequestBad, [1], max_attempts=10)(success_on_fifth_attempt) self.assertRaises(exceptions.RequestBad, should_raise) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/vshield/0000775000175100017510000000000013244524600023033 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/vshield/__init__.py0000666000175100017510000000000013244523345025141 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/vshield/fake_vcns.py0000666000175100017510000016312613244523345025364 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import xml.etree.ElementTree as ET import netaddr from oslo_serialization import jsonutils from oslo_utils import uuidutils import six from vmware_nsx._i18n import _ from vmware_nsx.plugins.nsx_v.vshield.common import constants from vmware_nsx.plugins.nsx_v.vshield.common import exceptions SECTION_LOCATION_HEADER = '/api/4.0/firewall/globalroot-0/config/%s/%s' class FakeVcns(object): errors = { 303: exceptions.ResourceRedirect, 400: exceptions.RequestBad, 403: exceptions.Forbidden, 404: exceptions.ResourceNotFound, 415: exceptions.MediaTypeUnsupport, 503: exceptions.ServiceUnavailable } def __init__(self, unique_router_name=True): self._jobs = {} self._job_idx = 0 self._edges = {} self._edge_idx = 0 self._lswitches = {} self._unique_router_name = unique_router_name self._fake_nsx_api = None self.fake_firewall_dict = {} self.temp_firewall = { "firewallRules": { "firewallRules": [] } } self.fake_ipsecvpn_dict = {} self.temp_ipsecvpn = { 'featureType': "ipsec_4.0", 'enabled': True, 'sites': {'sites': []}} self._fake_virtualservers_dict = {} self._fake_pools_dict = {} self._fake_monitors_dict = {} self._fake_app_profiles_dict = {} self._fake_loadbalancer_config = {} self._fake_virtual_wires = {} self._virtual_wire_id = 0 self._fake_portgroups = {} self._portgroup_id = 0 self._securitygroups = {'ids': 0, 'names': set()} self._sections = {'section_ids': 0, 'rule_ids': 0, 'names': set()} self._dhcp_bindings = {} self._spoofguard_policies = [] self._ipam_pools = {} def do_request(self, method, uri, params=None, format='json', **kwargs): pass def set_fake_nsx_api(self, fake_nsx_api): self._fake_nsx_api = fake_nsx_api def _validate_edge_name(self, name): for edge_id, edge in six.iteritems(self._edges): if edge['name'] == name: return False return True def get_edge_jobs(self, edge_id): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) header = { 'status': 200 } response = {"edgeJob": []} return (header, response) def deploy_edge(self, request): if (self._unique_router_name and not self._validate_edge_name(request['name'])): header = { 'status': 400 } msg = ('Edge name should be unique for tenant. Edge %s ' 'already exists for default tenant.') % request['name'] response = { 'details': msg, 'errorCode': 10085, 'rootCauseString': None, 'moduleName': 'vShield Edge', 'errorData': None } return (header, jsonutils.dumps(response)) self._edge_idx = self._edge_idx + 1 edge_id = "edge-%d" % self._edge_idx self._edges[edge_id] = { 'name': request['name'], 'request': request, 'nat_rules': None, 'nat_rule_id': 0, 'interface_index': 1 } header = { 'status': 200, 'location': 'https://host/api/4.0/edges/%s' % edge_id } response = '' return (header, response) def update_edge(self, edge_id, request): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) edge = self._edges[edge_id] edge['name'] = request['name'] header = { 'status': 200 } response = '' return (header, response) def get_edge_id(self, job_id): if job_id not in self._jobs: raise Exception(_("Job %s does not nexist") % job_id) header = { 'status': 200 } response = { 'edgeId': self._jobs[job_id] } return (header, response) def get_edge_deploy_status(self, edge_id): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) header = { 'status': 200, } response = { 'systemStatus': 'good' } return (header, response) def delete_edge(self, edge_id): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) del self._edges[edge_id] header = { 'status': 200 } response = '' return (header, response) def add_vdr_internal_interface(self, edge_id, interface): interface = interface['interfaces'][0] if not self._edges[edge_id].get('interfaces'): self._edges[edge_id]['interfaces'] = [] index = len(self._edges[edge_id]['interfaces']) interface['index'] = str(index) self._edges[edge_id]['interfaces'].append(interface) header = { 'status': 200 } response = {"interfaces": [{"index": str(index)}]} return (header, response) def get_edge_interfaces(self, edge_id): if not self._edges[edge_id].get('interfaces'): self._edges[edge_id]['interfaces'] = [] header = { 'status': 200 } response = {"interfaces": self._edges[edge_id].get('interfaces', [])} return (header, response) def update_vdr_internal_interface( self, edge_id, interface_index, interface): header = { 'status': 200 } response = '' return (header, response) def get_vdr_internal_interface(self, edge_id, interface_index): response = {} header = { 'status': 200 } for interface in self._edges[edge_id].get('interfaces', []): if int(interface['index']) == int(interface_index): response = interface return (header, response) def delete_vdr_internal_interface(self, edge_id, interface_index): for interface in self._edges[edge_id].get('interfaces', []): if int(interface['index']) == int(interface_index): header = { 'status': 200 } break header = {'status': 404} response = '' return (header, response) def get_interfaces(self, edge_id): header = { 'status': 200 } response = '' return (header, response) def update_interface(self, edge_id, vnic): header = { 'status': 200 } response = '' return (header, response) def delete_interface(self, edge_id, vnic_index): header = { 'status': 200 } response = '' return (header, response) def query_interface(self, edge_id, vnic_index): header = { 'status': 200 } response = { 'label': 'vNic_1', 'name': 'internal1', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'trunk', 'subInterfaces': {'subInterfaces': []}, 'isConnected': True } return (header, response) def reconfigure_dhcp_service(self, edge_id, request): header = { 'status': 201 } response = '' return (header, response) def query_dhcp_configuration(self, edge_id): header = { 'status': 200 } response = { "featureType": "dhcp_4.0", "version": 14, "enabled": True, "staticBindings": {"staticBindings": [{ "macAddress": "fa:16:3e:e6:ad:ce", "bindingId": "binding-1"}]}, "ipPools": {"ipPools": []} } return (header, response) def create_dhcp_binding(self, edge_id, request): if not self._dhcp_bindings.get(edge_id): self._dhcp_bindings[edge_id] = {} self._dhcp_bindings[edge_id]['idx'] = 0 binding_idx = self._dhcp_bindings[edge_id]['idx'] binding_idx_str = "binding-" + str(binding_idx) self._dhcp_bindings[edge_id][binding_idx_str] = request self._dhcp_bindings[edge_id]['idx'] = binding_idx + 1 header = { 'status': 200, 'location': '/dhcp/config/bindings/%s' % binding_idx_str } response = '' return (header, response) def delete_dhcp_binding(self, edge_id, binding_id): if binding_id not in self._dhcp_bindings[edge_id]: raise Exception(_("binding %s does not exist") % binding_id) del self._dhcp_bindings[edge_id][binding_id] header = { 'status': 200 } response = '' return (header, response) def get_dhcp_binding(self, edge_id, binding_id): if binding_id not in self._dhcp_bindings[edge_id]: raise Exception(_("binding %s does not exist") % binding_id) response = self._dhcp_bindings[edge_id][binding_id] header = { 'status': 200 } return (header, response) def create_bridge(self, edge_id, request): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) header = { 'status': 204 } response = '' return (header, response) def delete_bridge(self, edge_id): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) header = { 'status': 204 } response = '' return (header, response) def get_nat_config(self, edge_id): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) edge = self._edges[edge_id] rules = edge['nat_rules'] if rules is None: rules = { 'rules': { 'natRulesDtos': [] }, 'version': 1 } header = { 'status': 200 } rules['version'] = 1 return (header, rules) def update_nat_config(self, edge_id, nat): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) edge = self._edges[edge_id] max_rule_id = edge['nat_rule_id'] rules = copy.deepcopy(nat) for rule in rules['rules']['natRulesDtos']: rule_id = rule.get('ruleId', 0) if rule_id > max_rule_id: max_rule_id = rule_id for rule in rules['rules']['natRulesDtos']: if 'ruleId' not in rule: max_rule_id = max_rule_id + 1 rule['ruleId'] = max_rule_id edge['nat_rules'] = rules edge['nat_rule_id'] = max_rule_id header = { 'status': 200 } response = '' return (header, response) def delete_nat_rule(self, edge_id, rule_id): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) edge = self._edges[edge_id] rules = edge['nat_rules'] rule_to_delete = None for rule in rules['rules']['natRulesDtos']: if rule_id == rule['ruleId']: rule_to_delete = rule break if rule_to_delete is None: raise Exception(_("Rule id %d doest not exist") % rule_id) rules['rules']['natRulesDtos'].remove(rule_to_delete) header = { 'status': 200 } response = '' return (header, response) def get_edge_status(self, edge_id): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) header = { 'status': 200 } response = { 'edgeStatus': 'GREEN' } return (header, response) def get_edge(self, edge_id): if edge_id not in self._edges: raise exceptions.VcnsGeneralException( _("Edge %s does not exist!") % edge_id) header = { 'status': 200 } response = { 'name': 'fake-edge', 'id': edge_id, 'appliances': {'appliances': []} } return (header, response) def get_edges(self): edges = [] for edge_id in self._edges: edges.append({ 'id': edge_id, 'edgeStatus': 'GREEN', 'name': self._edges[edge_id]['name'] }) return edges def get_vdn_switch(self, dvs_id): header = { 'status': 200 } response = { 'name': 'fake-switch', 'id': dvs_id, 'teamingPolicy': 'ETHER_CHANNEL' } return (header, response) def update_vdn_switch(self, switch): header = { 'status': 200 } response = '' return (header, response) def update_routes(self, edge_id, routes): header = { 'status': 200 } response = '' return (header, response) def create_lswitch(self, lsconfig): # The lswitch is created via VCNS API so the fake nsx_api will not # see it. Added to fake nsx_api here. if self._fake_nsx_api: lswitch = self._fake_nsx_api._add_lswitch( jsonutils.dumps(lsconfig)) else: lswitch = lsconfig lswitch['uuid'] = uuidutils.generate_uuid() self._lswitches[lswitch['uuid']] = lswitch header = { 'status': 200 } lswitch['_href'] = '/api/ws.v1/lswitch/%s' % lswitch['uuid'] return (header, lswitch) def delete_lswitch(self, id): if id not in self._lswitches: raise Exception(_("Lswitch %s does not exist") % id) del self._lswitches[id] if self._fake_nsx_api: # TODO(fank): fix the hack del self._fake_nsx_api._fake_lswitch_dict[id] header = { 'status': 200 } response = '' return (header, response) def sync_firewall(self): header = {'status': 204} response = "" return self.return_helper(header, response) def update_firewall(self, edge_id, fw_req): self.fake_firewall_dict[edge_id] = fw_req rules = self.fake_firewall_dict[edge_id][ 'firewallRules']['firewallRules'] index = 10 for rule in rules: rule['ruleId'] = index index += 10 header = {'status': 204} response = "" return self.return_helper(header, response) def delete_firewall(self, edge_id): header = {'status': 404} if edge_id in self.fake_firewall_dict: header = {'status': 204} del self.fake_firewall_dict[edge_id] response = "" return self.return_helper(header, response) def update_firewall_rule(self, edge_id, vcns_rule_id, fwr_req): if edge_id not in self.fake_firewall_dict: raise Exception(_("Edge %s does not exist") % edge_id) header = {'status': 404} rules = self.fake_firewall_dict[edge_id][ 'firewallRules']['firewallRules'] for rule in rules: if rule['ruleId'] == int(vcns_rule_id): header['status'] = 204 rule.update(fwr_req) break response = "" return self.return_helper(header, response) def delete_firewall_rule(self, edge_id, vcns_rule_id): if edge_id not in self.fake_firewall_dict: raise Exception(_("Edge %s does not exist") % edge_id) header = {'status': 404} rules = self.fake_firewall_dict[edge_id][ 'firewallRules']['firewallRules'] for index in range(len(rules)): if rules[index]['ruleId'] == int(vcns_rule_id): header['status'] = 204 del rules[index] break response = "" return self.return_helper(header, response) def add_firewall_rule_above(self, edge_id, ref_vcns_rule_id, fwr_req): if edge_id not in self.fake_firewall_dict: raise Exception(_("Edge %s does not exist") % edge_id) header = {'status': 404} rules = self.fake_firewall_dict[edge_id][ 'firewallRules']['firewallRules'] pre = 0 for index in range(len(rules)): if rules[index]['ruleId'] == int(ref_vcns_rule_id): rules.insert(index, fwr_req) rules[index]['ruleId'] = (int(ref_vcns_rule_id) + pre) / 2 header = { 'status': 204, 'location': "https://host/api/4.0/edges/edge_id/firewall" "/config/rules/%s" % rules[index]['ruleId']} break pre = int(rules[index]['ruleId']) response = "" return self.return_helper(header, response) def add_firewall_rule(self, edge_id, fwr_req): if edge_id not in self.fake_firewall_dict: self.fake_firewall_dict[edge_id] = self.temp_firewall rules = self.fake_firewall_dict[edge_id][ 'firewallRules']['firewallRules'] rules.append(fwr_req) index = len(rules) rules[index - 1]['ruleId'] = index * 10 header = { 'status': 204, 'location': "https://host/api/4.0/edges/edge_id/firewall" "/config/rules/%s" % rules[index - 1]['ruleId']} response = "" return self.return_helper(header, response) def get_firewall(self, edge_id): if edge_id not in self.fake_firewall_dict: self.fake_firewall_dict[edge_id] = self.temp_firewall header = {'status': 204} response = self.fake_firewall_dict[edge_id] return self.return_helper(header, response) def get_firewall_rule(self, edge_id, vcns_rule_id): if edge_id not in self.fake_firewall_dict: raise Exception(_("Edge %s does not exist") % edge_id) header = {'status': 404} response = "" rules = self.fake_firewall_dict[edge_id][ 'firewallRules']['firewallRules'] for rule in rules: if rule['ruleId'] == int(vcns_rule_id): header['status'] = 204 response = rule break return self.return_helper(header, response) def is_name_unique(self, objs_dict, name): return name not in [obj_dict['name'] for obj_dict in objs_dict.values()] def create_vip(self, edge_id, vip_new): header = {'status': 403} response = "" if not self._fake_virtualservers_dict.get(edge_id): self._fake_virtualservers_dict[edge_id] = {} if not self.is_name_unique(self._fake_virtualservers_dict[edge_id], vip_new['name']): return self.return_helper(header, response) vip_vseid = uuidutils.generate_uuid() self._fake_virtualservers_dict[edge_id][vip_vseid] = vip_new header = { 'status': 204, 'location': "https://host/api/4.0/edges/edge_id" "/loadbalancer/config/%s" % vip_vseid} return self.return_helper(header, response) def get_vip(self, edge_id, vip_vseid): header = {'status': 404} response = "" if not self._fake_virtualservers_dict.get(edge_id) or ( not self._fake_virtualservers_dict[edge_id].get(vip_vseid)): return self.return_helper(header, response) header = {'status': 204} response = self._fake_virtualservers_dict[edge_id][vip_vseid] return self.return_helper(header, response) def update_vip(self, edge_id, vip_vseid, vip_new): header = {'status': 404} response = "" if not self._fake_virtualservers_dict.get(edge_id) or ( not self._fake_virtualservers_dict[edge_id].get(vip_vseid)): return self.return_helper(header, response) header = {'status': 204} self._fake_virtualservers_dict[edge_id][vip_vseid].update( vip_new) return self.return_helper(header, response) def delete_vip(self, edge_id, vip_vseid): header = {'status': 404} response = "" if not self._fake_virtualservers_dict.get(edge_id) or ( not self._fake_virtualservers_dict[edge_id].get(vip_vseid)): return self.return_helper(header, response) header = {'status': 204} del self._fake_virtualservers_dict[edge_id][vip_vseid] return self.return_helper(header, response) def create_pool(self, edge_id, pool_new): header = {'status': 403} response = "" if not self._fake_pools_dict.get(edge_id): self._fake_pools_dict[edge_id] = {} if not self.is_name_unique(self._fake_pools_dict[edge_id], pool_new['name']): return self.return_helper(header, response) pool_vseid = uuidutils.generate_uuid() self._fake_pools_dict[edge_id][pool_vseid] = pool_new header = { 'status': 204, 'location': "https://host/api/4.0/edges/edge_id" "/loadbalancer/config/%s" % pool_vseid} return self.return_helper(header, response) def get_pool(self, edge_id, pool_vseid): header = {'status': 404} response = "" if not self._fake_pools_dict.get(edge_id) or ( not self._fake_pools_dict[edge_id].get(pool_vseid)): return self.return_helper(header, response) header = {'status': 204} response = self._fake_pools_dict[edge_id][pool_vseid] return self.return_helper(header, response) def update_pool(self, edge_id, pool_vseid, pool_new): header = {'status': 404} response = "" if not self._fake_pools_dict.get(edge_id) or ( not self._fake_pools_dict[edge_id].get(pool_vseid)): return self.return_helper(header, response) header = {'status': 204} self._fake_pools_dict[edge_id][pool_vseid].update( pool_new) return self.return_helper(header, response) def delete_pool(self, edge_id, pool_vseid): header = {'status': 404} response = "" if not self._fake_pools_dict.get(edge_id) or ( not self._fake_pools_dict[edge_id].get(pool_vseid)): return self.return_helper(header, response) header = {'status': 204} del self._fake_pools_dict[edge_id][pool_vseid] return self.return_helper(header, response) def create_health_monitor(self, edge_id, monitor_new): if not self._fake_monitors_dict.get(edge_id): self._fake_monitors_dict[edge_id] = {} monitor_vseid = uuidutils.generate_uuid() self._fake_monitors_dict[edge_id][monitor_vseid] = monitor_new header = { 'status': 204, 'location': "https://host/api/4.0/edges/edge_id" "/loadbalancer/config/%s" % monitor_vseid} response = "" return self.return_helper(header, response) def get_health_monitor(self, edge_id, monitor_vseid): header = {'status': 404} response = "" if not self._fake_monitors_dict.get(edge_id) or ( not self._fake_monitors_dict[edge_id].get(monitor_vseid)): return self.return_helper(header, response) header = {'status': 204} response = self._fake_monitors_dict[edge_id][monitor_vseid] return self.return_helper(header, response) def update_health_monitor(self, edge_id, monitor_vseid, monitor_new): header = {'status': 404} response = "" if not self._fake_monitors_dict.get(edge_id) or ( not self._fake_monitors_dict[edge_id].get(monitor_vseid)): return self.return_helper(header, response) header = {'status': 204} self._fake_monitors_dict[edge_id][monitor_vseid].update( monitor_new) return self.return_helper(header, response) def delete_health_monitor(self, edge_id, monitor_vseid): header = {'status': 404} response = "" if not self._fake_monitors_dict.get(edge_id) or ( not self._fake_monitors_dict[edge_id].get(monitor_vseid)): return self.return_helper(header, response) header = {'status': 204} del self._fake_monitors_dict[edge_id][monitor_vseid] return self.return_helper(header, response) def create_app_profile(self, edge_id, app_profile): if not self._fake_app_profiles_dict.get(edge_id): self._fake_app_profiles_dict[edge_id] = {} app_profileid = uuidutils.generate_uuid() self._fake_app_profiles_dict[edge_id][app_profileid] = app_profile header = { 'status': 204, 'location': "https://host/api/4.0/edges/edge_id" "/loadbalancer/config/%s" % app_profileid} response = "" return self.return_helper(header, response) def update_app_profile(self, edge_id, app_profileid, app_profile): header = {'status': 404} response = "" if not self._fake_app_profiles_dict.get(edge_id) or ( not self._fake_app_profiles_dict[edge_id].get(app_profileid)): return self.return_helper(header, response) header = {'status': 204} self._fake_app_profiles_dict[edge_id][app_profileid].update( app_profile) return self.return_helper(header, response) def delete_app_profile(self, edge_id, app_profileid): header = {'status': 404} response = "" if not self._fake_app_profiles_dict.get(edge_id) or ( not self._fake_app_profiles_dict[edge_id].get(app_profileid)): return self.return_helper(header, response) header = {'status': 204} del self._fake_app_profiles_dict[edge_id][app_profileid] return self.return_helper(header, response) def create_app_rule(self, edge_id, app_rule): app_ruleid = uuidutils.generate_uuid() header = { 'status': 204, 'location': "https://host/api/4.0/edges/edge_id" "/loadbalancer/config/%s" % app_ruleid} response = "" return self.return_helper(header, response) def update_app_rule(self, edge_id, app_ruleid, app_rule): pass def delete_app_rule(self, edge_id, app_ruleid): pass def get_loadbalancer_config(self, edge_id): header = {'status': 204} response = {'config': False} if self._fake_loadbalancer_config[edge_id]: response['config'] = self._fake_loadbalancer_config[edge_id] return self.return_helper(header, response) def update_ipsec_config(self, edge_id, ipsec_config): self.fake_ipsecvpn_dict[edge_id] = ipsec_config header = {'status': 204} response = "" return self.return_helper(header, response) def delete_ipsec_config(self, edge_id): header = {'status': 404} if edge_id in self.fake_ipsecvpn_dict: header = {'status': 204} del self.fake_ipsecvpn_dict[edge_id] response = "" return self.return_helper(header, response) def get_ipsec_config(self, edge_id): if edge_id not in self.fake_ipsecvpn_dict: self.fake_ipsecvpn_dict[edge_id] = self.temp_ipsecvpn header = {'status': 204} response = self.fake_ipsecvpn_dict[edge_id] return self.return_helper(header, response) def enable_service_loadbalancer(self, edge_id, config): header = {'status': 204} response = "" self._fake_loadbalancer_config[edge_id] = True return self.return_helper(header, response) def create_virtual_wire(self, vdn_scope_id, request): self._virtual_wire_id += 1 header = {'status': 200} virtual_wire = 'virtualwire-%s' % self._virtual_wire_id data = {'name': request['virtualWireCreateSpec']['name'], 'objectId': virtual_wire} self._fake_virtual_wires.update({virtual_wire: data}) return (header, virtual_wire) def delete_virtual_wire(self, virtualwire_id): del self._fake_virtual_wires[virtualwire_id] header = { 'status': 200 } response = '' return (header, response) def create_port_group(self, dvs_id, request): self._portgroup_id += 1 header = {'status': 200} portgroup = 'dvportgroup-%s' % self._portgroup_id data = {'name': request['networkSpec']['networkName'], 'objectId': portgroup} self._fake_portgroups.update({portgroup: data}) return (header, portgroup) def delete_port_group(self, dvs_id, portgroup_id): del self._fake_portgroups[portgroup_id] header = { 'status': 200 } response = '' return (header, response) def return_helper(self, header, response): status = int(header['status']) if 200 <= status <= 300: return (header, response) if status in self.errors: cls = self.errors[status] else: cls = exceptions.VcnsApiException raise cls( status=status, header=header, uri='fake_url', response=response) def _get_bad_req_response(self, details, error_code, module_name): bad_req_response_format = """
%(details)s
%(error_code)s %(module_name)s
""" return bad_req_response_format % { 'details': details, 'error_code': error_code, 'module_name': module_name, } def _get_section_location(self, type, section_id): return SECTION_LOCATION_HEADER % (type, section_id) def _get_section_id_from_uri(self, section_uri): return section_uri.split('/')[-1] def _section_not_found(self, section_id): msg = "Invalid section id found : %s" % section_id response = self._get_bad_req_response(msg, 100089, 'vShield App') headers = {'status': 400} return (headers, response) def _unknown_error(self): msg = "Unknown Error Occurred.Please look into tech support logs." response = self._get_bad_req_response(msg, 100046, 'vShield App') headers = {'status': 400} return (headers, response) def create_security_group(self, request): sg = request['securitygroup'] if sg['name'] in self._securitygroups['names']: status = 400 msg = ("Another object with same name : %s already exists in " "the current scope : globalroot-0." % sg['name']) response = self._get_bad_req_response(msg, 210, 'core-services') else: sg_id = str(self._securitygroups['ids']) self._securitygroups['ids'] += 1 sg['members'] = set() self._securitygroups[sg_id] = sg self._securitygroups['names'].add(sg['name']) status, response = 201, sg_id return ({'status': status}, response) def update_security_group(self, sg_id, sg_name, description): sg = self._securitygroups[sg_id] self._securitygroups['names'].remove(sg['name']) sg['name'] = sg_name sg['description'] = description self._securitygroups['names'].add(sg_name) return {'status': 200}, '' def delete_security_group(self, securitygroup_id): try: del self._securitygroups[securitygroup_id] except KeyError: status = 404 msg = ("The requested object : %s could " "not be found. Object identifiers are case sensitive." % securitygroup_id) response = self._get_bad_req_response(msg, 210, 'core-services') else: status, response = 200, '' return ({'status': status}, response) def get_security_group_id(self, sg_name): for k, v in self._securitygroups.items(): if k not in ('ids', 'names') and v['name'] == sg_name: return k def get_security_group(self, sg_id): sg = self._securitygroups.get(sg_id) if sg: return ('%s"%s"' '' % (sg_id, sg.get("name"))) def list_security_groups(self): response = "" header = {'status': 200} for k in self._securitygroups.keys(): if k not in ('ids', 'names'): response += self.get_security_group(k) response = "%s" % response return header, response def create_redirect_section(self, request): return self.create_section('layer3redirect', request) def create_section(self, type, request, insert_top=False, insert_before=None): section = ET.fromstring(request) section_name = section.attrib.get('name') if section_name in self._sections['names']: msg = "Section with name %s already exists." % section_name response = self._get_bad_req_response(msg, 100092, 'vShield App') headers = {'status': 400} else: section_id = str(self._sections['section_ids']) section.attrib['id'] = 'section-%s' % section_id _section = self._sections[section_id] = {'name': section_name, 'etag': 'Etag-0', 'rules': {}} self._sections['names'].add(section_name) for rule in section.findall('rule'): rule_id = str(self._sections['rule_ids']) rule.attrib['id'] = rule_id _section['rules'][rule_id] = ET.tostring(rule) self._sections['rule_ids'] += 1 response = ET.tostring(section) headers = { 'status': 201, 'location': self._get_section_location(type, section_id), 'etag': _section['etag'] } self._sections['section_ids'] += 1 return (headers, response) def update_section(self, section_uri, request, h): section = ET.fromstring(request) section_id = section.attrib.get('id') section_name = section.attrib.get('name') if section_id not in self._sections: return self._section_not_found(section_id) _section = self._sections[section_id] if (_section['name'] != section_name and section_name in self._sections['names']): # Theres a section with this name already headers, response = self._unknown_error() else: # Different Etag every successful update _section['etag'] = ('Etag-1' if _section['etag'] == 'Etag-0' else 'Etag-0') self._sections['names'].remove(_section['name']) _section['name'] = section_name self._sections['names'].add(section_name) for rule in section.findall('rule'): if not rule.attrib.get('id'): rule.attrib['id'] = str(self._sections['rule_ids']) self._sections['rule_ids'] += 1 rule_id = rule.attrib.get('id') _section['rules'][rule_id] = ET.tostring(rule) _, response = self._get_section(section_id) headers = { 'status': 200, 'location': self._get_section_location(type, section_id), 'etag': _section['etag'] } return (headers, response) def delete_section(self, section_uri): section_id = self._get_section_id_from_uri(section_uri) if section_id not in self._sections: headers, response = self._unknown_error() else: section_name = self._sections[section_id]['name'] del self._sections[section_id] self._sections['names'].remove(section_name) response = '' headers = {'status': 204} return (headers, response) def get_section(self, section_uri): section_id = self._get_section_id_from_uri(section_uri) if section_id not in self._sections: headers, response = self._section_not_found(section_id) else: return self._get_section(section_id) def _get_section(self, section_id): section_rules = ( b''.join(self._sections[section_id]['rules'].values())) response = ('
%s
' % (section_id, self._sections[section_id]['name'], section_rules)) headers = {'status': 200, 'etag': self._sections[section_id]['etag']} return (headers, response) def get_section_id(self, section_name): for k, v in self._sections.items(): if (k not in ('section_ids', 'rule_ids', 'names') and v['name'] == section_name): return k def update_section_by_id(self, id, type, request): pass def get_default_l3_id(self): return 1234 def get_dfw_config(self): response = "" for sec_id in self._sections.keys(): if sec_id.isdigit(): h, r = self._get_section(str(sec_id)) response += r response = "%s" % response headers = {'status': 200} return (headers, response) def remove_rule_from_section(self, section_uri, rule_id): section_id = self._get_section_id_from_uri(section_uri) if section_id not in self._sections: headers, response = self._section_not_found(section_id) else: section = self._sections[section_id] if rule_id in section['rules']: del section['rules'][rule_id] response = '' headers = {'status': 204} else: headers, response = self._unknown_error() return (headers, response) def add_member_to_security_group(self, security_group_id, member_id): if security_group_id not in self._securitygroups: msg = ("The requested object : %s could not be found." "Object identifiers are case " "sensitive.") % security_group_id response = self._get_bad_req_response(msg, 202, 'core-services') headers = {'status': 404} else: self._securitygroups[security_group_id]['members'].add(member_id) response = '' headers = {'status': 200} return (headers, response) def remove_member_from_security_group(self, security_group_id, member_id): if security_group_id not in self._securitygroups: msg = ("The requested object : %s could not be found." "Object identifiers are " "case sensitive.") % security_group_id response = self._get_bad_req_response(msg, 202, 'core-services') headers = {'status': 404} else: self._securitygroups[security_group_id]['members'].remove( member_id) response = '' headers = {'status': 200} return (headers, response) def create_spoofguard_policy(self, enforcement_points, name, enable): policy = {'name': name, 'enforcementPoints': [{'id': enforcement_points[0]}], 'operationMode': 'MANUAL' if enable else 'DISABLE'} policy_id = len(self._spoofguard_policies) self._spoofguard_policies.append(policy) return None, 'spoofguardpolicy-%s' % policy_id def _get_index(self, policy_id): return int(policy_id.split('-')[-1]) def update_spoofguard_policy(self, policy_id, enforcement_points, name, enable): policy = {'name': name, 'enforcementPoints': [{'id': enforcement_points[0]}], 'operationMode': 'MANUAL' if enable else 'DISABLE'} self._spoofguard_policies[self._get_index(policy_id)] = policy return None, '' def delete_spoofguard_policy(self, policy_id): self._spoofguard_policies[self._get_index(policy_id)] = {} def get_spoofguard_policy(self, policy_id): try: return None, self._spoofguard_policies[self._get_index(policy_id)] except IndexError: raise exceptions.VcnsGeneralException( _("Spoofguard policy not found")) def get_spoofguard_policies(self): return None, {'policies': self._spoofguard_policies} def approve_assigned_addresses(self, policy_id, vnic_id, mac_addr, addresses): pass def publish_assigned_addresses(self, policy_id, vnic_id): pass def configure_reservations(self): pass def inactivate_vnic_assigned_addresses(self, policy_id, vnic_id): pass def add_vm_to_exclude_list(self, vm_id): pass def delete_vm_from_exclude_list(self, vm_id): pass def get_scoping_objects(self): response = ('' 'Network' 'aaa' 'bbb' '') return response def reset_all(self): self._jobs.clear() self._edges.clear() self._lswitches.clear() self.fake_firewall_dict = {} self._fake_virtualservers_dict = {} self._fake_pools_dict = {} self._fake_monitors_dict = {} self._fake_app_profiles_dict = {} self._fake_loadbalancer_config = {} self._fake_virtual_wires = {} self._virtual_wire_id = 0 self._fake_portgroups = {} self._portgroup_id = 0 self._securitygroups = {'ids': 0, 'names': set()} self._sections = {'section_ids': 0, 'rule_ids': 0, 'names': set()} self._dhcp_bindings = {} self._ipam_pools = {} def validate_datacenter_moid(self, object_id, during_init=False): return True def validate_network(self, object_id, during_init=False): return True def validate_network_name(self, object_id, name, during_init=False): return True def validate_vdn_scope(self, object_id): return True def get_dvs_list(self): return [] def validate_dvs(self, object_id, dvs_list=None): return True def edges_lock_operation(self): pass def validate_inventory(self, moref): return True def get_version(self): return '6.2.3' def get_tuning_configration(self): return { 'lockUpdatesOnEdge': True, 'edgeVMHealthCheckIntervalInMin': 0, 'aggregatePublishing': False, 'publishingTimeoutInMs': 1200000, 'healthCheckCommandTimeoutInMs': 120000, 'maxParallelVixCallsForHealthCheck': 25} def configure_aggregate_publishing(self): pass def enable_ha(self, edge_id, request_config): header = { 'status': 201 } response = '' return (header, response) def get_edge_syslog(self, edge_id): if ('syslog' not in self._edges.get(edge_id)): header = { 'status': 400 } response = {} else: header = { 'status': 200 } response = self._edges.get(edge_id)['syslog'] return (header, response) def update_edge_syslog(self, edge_id, config): if edge_id not in self._edges: raise exceptions.VcnsGeneralException( _("edge not found")) self._edges[edge_id]['syslog'] = config header = { 'status': 204 } response = '' return (header, response) def delete_edge_syslog(self, edge_id): header = { 'status': 204 } response = '' return (header, response) def update_edge_config_with_modifier(self, edge_id, module, modifier): header = { 'status': 204 } response = '' return (header, response) def change_edge_appliance_size(self, edge_id, size): header = { 'status': 204 } response = {} return (header, response) def change_edge_appliance(self, edge_id, request): header = { 'status': 204 } response = {} return (header, response) def get_edge_appliances(self, edge_id): header = { 'status': 204 } response = {} return (header, response) def get_routes(self, edge_id): header = { 'status': 204 } response = {'staticRoutes': {'staticRoutes': []}} return (header, response) def get_service_insertion_profile(self, profile_id): headers = {'status': 200} response = """ %s ServiceProfile ServiceProfile Service_Vendor securitygroup-30 """ response_format = response % profile_id return (headers, response_format) def update_service_insertion_profile_binding(self, profile_id, request): response = '' headers = {'status': 200} return (headers, response) def create_ipam_ip_pool(self, request): pool_id = uuidutils.generate_uuid() # format the request before saving it: fixed_request = request['ipamAddressPool'] ranges = fixed_request['ipRanges'] for i in range(len(ranges)): ranges[i] = ranges[i]['ipRangeDto'] self._ipam_pools[pool_id] = {'request': fixed_request, 'allocated': []} header = {'status': 200} response = pool_id return (header, response) def delete_ipam_ip_pool(self, pool_id): response = '' if pool_id in self._ipam_pools: pool = self._ipam_pools.pop(pool_id) if len(pool['allocated']) > 0: header = {'status': 400} msg = ("Unable to delete IP pool %s. IP addresses from this " "pool are being used." % pool_id) response = self._get_bad_req_response( msg, 120053, 'core-services') else: header = {'status': 200} return (header, response) else: header = {'status': 400} msg = ("Unable to delete IP pool %s. Pool does not exist." % pool_id) response = self._get_bad_req_response( msg, 120054, 'core-services') return self.return_helper(header, response) def get_ipam_ip_pool(self, pool_id): if pool_id in self._ipam_pools: header = {'status': 200} response = self._ipam_pools[pool_id]['request'] else: header = {'status': 400} msg = ("Unable to retrieve IP pool %s. Pool does not exist." % pool_id) response = self._get_bad_req_response( msg, 120054, 'core-services') return self.return_helper(header, response) def _allocate_ipam_add_ip_and_return(self, pool, ip_addr): # build the response response_text = ( "" "%(id)s" "%(ip)s" "%(gateway)s" "%(prefix)s" "subnet-44") response_args = {'id': len(pool['allocated']), 'gateway': pool['request']['gateway'], 'prefix': pool['request']['prefixLength']} response_args['ip'] = ip_addr response = response_text % response_args # add the ip to the list of allocated ips pool['allocated'].append(ip_addr) header = {'status': 200} return (header, response) def allocate_ipam_ip_from_pool(self, pool_id, ip_addr=None): if pool_id in self._ipam_pools: pool = self._ipam_pools[pool_id] if ip_addr: # verify that this ip was not yet allocated if ip_addr in pool['allocated']: header = {'status': 400} msg = ("Unable to allocate IP from pool %(pool)s. " "IP %(ip)s already in use." % {'pool': pool_id, 'ip': ip_addr}) response = self._get_bad_req_response( msg, constants.NSX_ERROR_IPAM_ALLOCATE_IP_USED, 'core-services') else: return self._allocate_ipam_add_ip_and_return( pool, ip_addr) else: # get an unused ip from the pool for ip_range in pool['request']['ipRanges']: r = netaddr.IPRange(ip_range['startAddress'], ip_range['endAddress']) for ip_addr in r: if str(ip_addr) not in pool['allocated']: return self._allocate_ipam_add_ip_and_return( pool, str(ip_addr)) # if we got here - no ip was found header = {'status': 400} msg = ("Unable to allocate IP from pool %(pool)s. " "All IPs have been used." % {'pool': pool_id}) response = self._get_bad_req_response( msg, constants.NSX_ERROR_IPAM_ALLOCATE_ALL_USED, 'core-services') else: header = {'status': 400} msg = ("Unable to allocate IP from pool %s. Pool does not " "exist." % pool_id) response = self._get_bad_req_response( msg, 120054, 'core-services') return self.return_helper(header, response) def release_ipam_ip_to_pool(self, pool_id, ip_addr): if pool_id in self._ipam_pools: pool = self._ipam_pools[pool_id] if ip_addr not in pool['allocated']: header = {'status': 400} msg = ("IP %(ip)s was not allocated from pool %(pool)s." % {'ip': ip_addr, 'pool': pool_id}) response = self._get_bad_req_response( msg, 120056, 'core-services') else: pool['allocated'].remove(ip_addr) response = '' header = {'status': 200} else: header = {'status': 400} msg = ("Unable to release IP to pool %s. Pool does not exist." % pool_id) response = self._get_bad_req_response( msg, 120054, 'core-services') return self.return_helper(header, response) def get_security_policy(self, policy_id, return_xml=True): name = 'pol1' description = 'dummy' if return_xml: response_text = ( "" "%(id)s" "%(name)s" "%(desc)s" "") % {'id': policy_id, 'name': name, 'desc': description} return response_text else: return {'objectId': policy_id, 'name': name, 'description': description} def update_security_policy(self, policy_id, request): pass def get_security_policies(self): policies = [] for id in ['policy-1', 'policy-2', 'policy-3']: policies.append(self.get_security_policy(id, return_xml=False)) return {'policies': policies} def list_applications(self): applications = [{'name': 'ICMP Echo', 'objectID': 'application-333'}, {'name': 'IPv6-ICMP Echo', 'objectID': 'application-1001'}] return applications def update_dynamic_routing_service(self, edge_id, request_config): header = {'status': 201} response = { 'routerId': '172.24.4.12', 'ipPrefixes': { 'ipPrefixes': [ {'ipAddress': '10.0.0.0/24', 'name': 'prefix-name'} ] } } return self.return_helper(header, response) def get_edge_routing_config(self, edge_id): header = {'status': 200} response = { 'featureType': '', 'ospf': {}, 'routingGlobalConfig': { 'routerId': '172.24.4.12', 'ipPrefixes': { 'ipPrefixes': [ {'ipAddress': '10.0.0.0/24', 'name': 'prefix-name'} ] }, 'logging': { 'logLevel': 'info', 'enable': False }, 'ecmp': False } } return self.return_helper(header, response) def update_edge_routing_config(self, edge_id, request): header = {'status': 200} return self.return_helper(header, {}) def update_bgp_dynamic_routing(self, edge_id, bgp_request): header = {"status": 201} response = { "localAS": 65000, "enabled": True, "bgpNeighbours": { "bgpNeighbours": [ { "bgpFilters": { "bgpFilters": [ { "action": "deny", "direction": "in" } ] }, "password": None, "ipAddress": "172.24.4.253", "remoteAS": 65000 } ] }, "redistribution": { "rules": { "rules": [ { "action": "deny", "from": { "bgp": False, "connected": False, "static": False, "ospf": False }, "id": 0 }, { "action": "permit", "from": { "bgp": False, "connected": True, "static": True, "ospf": False }, "id": 1, "prefixName": "eee4eb79-359e-4416" } ] }, "enabled": True } } return self.return_helper(header, response) def get_bgp_routing_config(self, edge_id): header = {'status': 200} response = { "localAS": 65000, "enabled": True, "redistribution": { "rules": { "rules": [ { "action": "deny", "from": { "bgp": False, "connected": False, "static": False, "ospf": False }, "id": 0 }, { "action": "permit", "from": { "bgp": False, "connected": True, "static": True, "ospf": False }, "id": 1, "prefixName": "eee4eb79-359e-4416" } ] }, "enabled": True } } return self.return_helper(header, response) def delete_bgp_routing_config(self, edge_id): header = {'status': 200} response = '' return header, response def get_application_id(self, name): return 'application-123' vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/vshield/test_vcns_driver.py0000666000175100017510000005004613244523345027004 0ustar zuulzuul00000000000000# Copyright 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import greenthread import mock from neutron.tests import base from neutron_lib import context as neutron_context from oslo_config import cfg import six from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield import edge_appliance_driver as e_drv from vmware_nsx.plugins.nsx_v.vshield.tasks import ( constants as ts_const) from vmware_nsx.plugins.nsx_v.vshield.tasks import tasks as ts from vmware_nsx.plugins.nsx_v.vshield import vcns_driver from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsx_v.vshield import fake_vcns VCNS_CONFIG_FILE = vmware.get_fake_conf("vcns.ini.test") ts.TaskManager.set_default_interval(100) class VcnsDriverTaskManagerTestCase(base.BaseTestCase): def setUp(self): super(VcnsDriverTaskManagerTestCase, self).setUp() self.manager = ts.TaskManager() self.manager.start(100) def tearDown(self): self.manager.stop() # Task manager should not leave running threads around # if _thread is None it means it was killed in stop() self.assertIsNone(self.manager._thread) super(VcnsDriverTaskManagerTestCase, self).tearDown() def _test_task_manager_task_process_state(self, sync_exec=False): def _task_failed(task, reason): task.userdata['result'] = False task.userdata['error'] = reason def _check_state(task, exp_state): if not task.userdata.get('result', True): return False state = task.userdata['state'] if state != exp_state: msg = "state %d expect %d" % ( state, exp_state) _task_failed(task, msg) return False task.userdata['state'] = state + 1 return True def _exec(task): if not _check_state(task, 1): return ts_const.TaskStatus.ERROR if task.userdata['sync_exec']: return ts_const.TaskStatus.COMPLETED else: return ts_const.TaskStatus.PENDING def _status(task): if task.userdata['sync_exec']: _task_failed(task, "_status callback triggered") state = task.userdata['state'] if state == 3: _check_state(task, 3) return ts_const.TaskStatus.PENDING else: _check_state(task, 4) return ts_const.TaskStatus.COMPLETED def _result(task): if task.userdata['sync_exec']: exp_state = 3 else: exp_state = 5 _check_state(task, exp_state) def _start_monitor(task): _check_state(task, 0) def _executed_monitor(task): _check_state(task, 2) def _result_monitor(task): if task.userdata['sync_exec']: exp_state = 4 else: exp_state = 6 if _check_state(task, exp_state): task.userdata['result'] = True else: task.userdata['result'] = False userdata = { 'state': 0, 'sync_exec': sync_exec } task = ts.Task('name', 'res', _exec, _status, _result, userdata) task.add_start_monitor(_start_monitor) task.add_executed_monitor(_executed_monitor) task.add_result_monitor(_result_monitor) self.manager.add(task) task.wait(ts_const.TaskState.RESULT) self.assertTrue(userdata['result']) def test_task_manager_task_sync_exec_process_state(self): self._test_task_manager_task_process_state(sync_exec=True) def test_task_manager_task_async_exec_process_state(self): self._test_task_manager_task_process_state(sync_exec=False) def test_task_manager_task_ordered_process(self): def _task_failed(task, reason): task.userdata['result'] = False task.userdata['error'] = reason def _exec(task): task.userdata['executed'] = True return ts_const.TaskStatus.PENDING def _status(task): return ts_const.TaskStatus.COMPLETED def _result(task): next_task = task.userdata.get('next') if next_task: if next_task.userdata.get('executed'): _task_failed(next_task, "executed premature") if task.userdata.get('result', True): task.userdata['result'] = True tasks = [] prev = None last_task = None for i in range(5): name = "name-%d" % i task = ts.Task(name, 'res', _exec, _status, _result, {}) tasks.append(task) if prev: prev.userdata['next'] = task prev = task last_task = task for task in tasks: self.manager.add(task) last_task.wait(ts_const.TaskState.RESULT) for task in tasks: self.assertTrue(task.userdata['result']) def test_task_manager_task_parallel_process(self): tasks = [] def _exec(task): task.userdata['executed'] = True return ts_const.TaskStatus.PENDING def _status(task): for t in tasks: if not t.userdata.get('executed'): t.userdata['resut'] = False return ts_const.TaskStatus.COMPLETED def _result(task): if (task.userdata.get('result') is None and task.status == ts_const.TaskStatus.COMPLETED): task.userdata['result'] = True else: task.userdata['result'] = False for i in range(5): name = "name-%d" % i res = 'resource-%d' % i task = ts.Task(name, res, _exec, _status, _result, {}) tasks.append(task) self.manager.add(task) for task in tasks: task.wait(ts_const.TaskState.RESULT) self.assertTrue(task.userdata['result']) def _test_task_manager_stop(self, exec_wait=False, result_wait=False, stop_wait=0): def _exec(task): if exec_wait: greenthread.sleep(0.01) return ts_const.TaskStatus.PENDING def _status(task): greenthread.sleep(0.01) return ts_const.TaskStatus.PENDING def _result(task): if result_wait: greenthread.sleep(0) manager = ts.TaskManager().start(100) manager.stop() # Task manager should not leave running threads around # if _thread is None it means it was killed in stop() self.assertIsNone(manager._thread) manager.start(100) alltasks = {} for i in range(100): res = 'res-%d' % i tasks = [] for i in range(100): task = ts.Task('name', res, _exec, _status, _result) manager.add(task) tasks.append(task) alltasks[res] = tasks greenthread.sleep(stop_wait) manager.stop() # Task manager should not leave running threads around # if _thread is None it means it was killed in stop() self.assertIsNone(manager._thread) for res, tasks in six.iteritems(alltasks): for task in tasks: self.assertEqual(ts_const.TaskStatus.ABORT, task.status) def test_task_manager_stop_1(self): self._test_task_manager_stop(True, True, 0) def test_task_manager_stop_2(self): self._test_task_manager_stop(True, True, 1) def test_task_manager_stop_3(self): self._test_task_manager_stop(False, False, 0) def test_task_manager_stop_4(self): self._test_task_manager_stop(False, False, 1) def test_task_pending_task(self): def _exec(task): task.userdata['executing'] = True while not task.userdata['tested']: greenthread.sleep(0) task.userdata['executing'] = False return ts_const.TaskStatus.COMPLETED userdata = { 'executing': False, 'tested': False } manager = ts.TaskManager().start(100) task = ts.Task('name', 'res', _exec, userdata=userdata) manager.add(task) while not userdata['executing']: greenthread.sleep(0) self.assertTrue(manager.has_pending_task()) userdata['tested'] = True while userdata['executing']: greenthread.sleep(0) self.assertFalse(manager.has_pending_task()) class VcnsDriverTestCase(base.BaseTestCase): def vcns_patch(self): instance = self.mock_vcns.start() instance.return_value.deploy_edge.side_effect = self.fc.deploy_edge instance.return_value.get_edge_id.side_effect = self.fc.get_edge_id instance.return_value.get_edge_deploy_status.side_effect = ( self.fc.get_edge_deploy_status) instance.return_value.delete_edge.side_effect = self.fc.delete_edge instance.return_value.update_interface.side_effect = ( self.fc.update_interface) instance.return_value.get_nat_config.side_effect = ( self.fc.get_nat_config) instance.return_value.update_nat_config.side_effect = ( self.fc.update_nat_config) instance.return_value.delete_nat_rule.side_effect = ( self.fc.delete_nat_rule) instance.return_value.get_edge_status.side_effect = ( self.fc.get_edge_status) instance.return_value.get_edges.side_effect = self.fc.get_edges instance.return_value.update_routes.side_effect = ( self.fc.update_routes) instance.return_value.create_lswitch.side_effect = ( self.fc.create_lswitch) instance.return_value.delete_lswitch.side_effect = ( self.fc.delete_lswitch) def setUp(self): super(VcnsDriverTestCase, self).setUp() self.ctx = neutron_context.get_admin_context() self.temp_e_drv_nsxv_db = e_drv.nsxv_db e_drv.nsxv_db = mock.MagicMock() self.config_parse(args=['--config-file', VCNS_CONFIG_FILE]) self.fc = fake_vcns.FakeVcns() self.mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) self.vcns_patch() self.addCleanup(self.fc.reset_all) self.vcns_driver = vcns_driver.VcnsDriver(self) self.az = (nsx_az.NsxVAvailabilityZones(). get_default_availability_zone()) self.edge_id = None self.result = None def tearDown(self): e_drv.nsxv_db = self.temp_e_drv_nsxv_db self.vcns_driver.task_manager.stop() # Task manager should not leave running threads around # if _thread is None it means it was killed in stop() self.assertIsNone(self.vcns_driver.task_manager._thread) super(VcnsDriverTestCase, self).tearDown() def complete_edge_creation( self, context, edge_id, name, router_id, dist, deploy_successful, availability_zone=None, deploy_metadata=False): pass def _deploy_edge(self): self.edge_id = self.vcns_driver.deploy_edge( self.ctx, 'router-id', 'myedge', 'internal-network', availability_zone=self.az) self.assertEqual('edge-1', self.edge_id) def test_deploy_edge_with(self): self.vcns_driver.deploy_edge( self.ctx, 'router-id', 'myedge', 'internal-network', availability_zone=self.az) status = self.vcns_driver.get_edge_status('edge-1') self.assertEqual(vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE, status) def test_deploy_edge_fail(self): self.vcns_driver.deploy_edge( self.ctx, 'router-1', 'myedge', 'internal-network', availability_zone=self.az) # self.vcns_driver.deploy_edge( # self.ctx, 'router-2', 'myedge', 'internal-network', # availability_zone=self.az) self.assertRaises( nsxv_exc.NsxPluginException, self.vcns_driver.deploy_edge, self.ctx, 'router-2', 'myedge', 'internal-network', availability_zone=self.az) def test_get_edge_status(self): self._deploy_edge() status = self.vcns_driver.get_edge_status(self.edge_id) self.assertEqual(vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE, status) def test_update_nat_rules(self): self._deploy_edge() snats = [{ 'src': '192.168.1.0/24', 'translated': '10.0.0.1' }, { 'src': '192.168.2.0/24', 'translated': '10.0.0.2' }, { 'src': '192.168.3.0/24', 'translated': '10.0.0.3' } ] dnats = [{ 'dst': '100.0.0.4', 'translated': '192.168.1.1' }, { 'dst': '100.0.0.5', 'translated': '192.168.2.1' } ] result = self.vcns_driver.update_nat_rules(self.edge_id, snats, dnats) self.assertTrue(result) natcfg = self.vcns_driver.get_nat_config(self.edge_id) rules = natcfg['rules']['natRulesDtos'] self.assertEqual(2 * len(dnats) + len(snats), len(rules)) self.natEquals(rules[0], dnats[0]) self.natEquals(rules[1], self.snat_for_dnat(dnats[0])) self.natEquals(rules[2], dnats[1]) self.natEquals(rules[3], self.snat_for_dnat(dnats[1])) self.natEquals(rules[4], snats[0]) self.natEquals(rules[5], snats[1]) self.natEquals(rules[6], snats[2]) def test_update_nat_rules_for_all_vnics(self): self._deploy_edge() snats = [{ 'src': '192.168.1.0/24', 'translated': '10.0.0.1' }, { 'src': '192.168.2.0/24', 'translated': '10.0.0.2' }, { 'src': '192.168.3.0/24', 'translated': '10.0.0.3' } ] dnats = [{ 'dst': '100.0.0.4', 'translated': '192.168.1.1' }, { 'dst': '100.0.0.5', 'translated': '192.168.2.1' } ] indices = [0, 1, 2, 3] result = self.vcns_driver.update_nat_rules(self.edge_id, snats, dnats, indices) self.assertTrue(result) natcfg = self.vcns_driver.get_nat_config(self.edge_id) rules = natcfg['rules']['natRulesDtos'] self.assertEqual(2 * len(indices) * len(dnats) + len(indices) * len(snats), len(rules)) sorted_rules = sorted(rules, key=lambda k: k['vnic']) for i in range(0, len(sorted_rules), 7): self.natEquals(sorted_rules[i], dnats[0]) self.natEquals(sorted_rules[i + 1], self.snat_for_dnat(dnats[0])) self.natEquals(sorted_rules[i + 2], dnats[1]) self.natEquals(sorted_rules[i + 3], self.snat_for_dnat(dnats[1])) self.natEquals(sorted_rules[i + 4], snats[0]) self.natEquals(sorted_rules[i + 5], snats[1]) self.natEquals(sorted_rules[i + 6], snats[2]) def test_update_nat_rules_for_specific_vnics(self): self._deploy_edge() snats = [{ 'src': '192.168.1.0/24', 'translated': '10.0.0.1', 'vnic_index': 5 }, { 'src': '192.168.2.0/24', 'translated': '10.0.0.2' }, { 'src': '192.168.3.0/24', 'translated': '10.0.0.3' } ] dnats = [{ 'dst': '100.0.0.4', 'translated': '192.168.1.1', 'vnic_index': 2 }, { 'dst': '100.0.0.5', 'translated': '192.168.2.1' } ] result = self.vcns_driver.update_nat_rules(self.edge_id, snats, dnats) self.assertTrue(result) natcfg = self.vcns_driver.get_nat_config(self.edge_id) rules = natcfg['rules']['natRulesDtos'] self.assertEqual(2 * len(dnats) + len(snats), len(rules)) self.natEquals(rules[0], dnats[0]) self.assertEqual(2, rules[0]['vnic']) self.natEquals(rules[1], self.snat_for_dnat(dnats[0])) self.assertEqual(2, rules[1]['vnic']) self.natEquals(rules[2], dnats[1]) self.assertNotIn('vnic', rules[2]) self.natEquals(rules[3], self.snat_for_dnat(dnats[1])) self.assertNotIn('vnic', rules[3]) self.natEquals(rules[4], snats[0]) self.assertEqual(5, rules[4]['vnic']) self.natEquals(rules[5], snats[1]) self.assertNotIn('vnic', rules[5]) self.natEquals(rules[6], snats[2]) self.assertNotIn('vnic', rules[6]) def snat_for_dnat(self, dnat): return { 'src': dnat['translated'], 'translated': dnat['dst'] } def natEquals(self, rule, exp): addr = exp.get('src') if not addr: addr = exp.get('dst') self.assertEqual(addr, rule['originalAddress']) self.assertEqual(exp['translated'], rule['translatedAddress']) def test_update_routes(self): self._deploy_edge() routes = [{ 'cidr': '192.168.1.0/24', 'nexthop': '169.254.2.1' }, { 'cidr': '192.168.2.0/24', 'nexthop': '169.254.2.1' }, { 'cidr': '192.168.3.0/24', 'nexthop': '169.254.2.1' } ] result = self.vcns_driver.update_routes( self.edge_id, '10.0.0.1', routes) self.assertTrue(result) def test_update_interface(self): self._deploy_edge() self.vcns_driver.update_interface( 'router-id', self.edge_id, vcns_const.EXTERNAL_VNIC_INDEX, 'network-id', address='100.0.0.3', netmask='255.255.255.0') def test_delete_edge(self): self._deploy_edge() result = self.vcns_driver.delete_edge( self.ctx, 'router-id', self.edge_id) self.assertTrue(result) def test_create_lswitch(self): tz_config = [{ 'transport_zone_uuid': 'tz-uuid' }] lswitch = self.vcns_driver.create_lswitch('lswitch', tz_config) self.assertEqual('lswitch', lswitch['display_name']) self.assertEqual('LogicalSwitchConfig', lswitch['type']) self.assertIn('uuid', lswitch) def test_delete_lswitch(self): tz_config = { 'transport_zone_uuid': 'tz-uuid' } lswitch = self.vcns_driver.create_lswitch('lswitch', tz_config) self.vcns_driver.delete_lswitch(lswitch['uuid']) class VcnsDriverHATestCase(VcnsDriverTestCase): def setUp(self): # add edge_ha and ha_datastore to the pre-defined configuration self._data_store = 'fake-datastore' self._ha_data_store = 'fake-datastore-2' cfg.CONF.set_override('ha_datastore_id', self._ha_data_store, group="nsxv") cfg.CONF.set_override('edge_ha', True, group="nsxv") super(VcnsDriverHATestCase, self).setUp() self.vcns_driver.vcns.orig_deploy = self.vcns_driver.vcns.deploy_edge self.vcns_driver.vcns.deploy_edge = self._fake_deploy_edge def _fake_deploy_edge(self, request): # validate the appliance structure in the request, # and return the regular (fake) response found_app = request['appliances']['appliances'] self.assertEqual(2, len(found_app)) self.assertEqual(self._data_store, found_app[0]['datastoreId']) self.assertEqual(self._ha_data_store, found_app[1]['datastoreId']) return self.vcns_driver.vcns.orig_deploy(request) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/vshield/test_edge_utils.py0000666000175100017510000011462313244523345026606 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from neutron_lib import constants from neutron_lib import context from oslo_config import cfg from oslo_utils import uuidutils from six import moves from neutron.tests.unit import testlib_api from neutron_lib import exceptions as n_exc from vmware_nsx.common import config as conf from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import nsxv_constants from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.tests import unit as vmware _uuid = uuidutils.generate_uuid #Four types of backup edge with different status EDGE_AVAIL = 'available-' EDGE_CREATING = 'creating-' EDGE_ERROR1 = 'error1-' EDGE_ERROR2 = 'error2-' EDGE_DELETING = 'deleting-' DEFAULT_AZ = 'default' class EdgeUtilsTestCaseMixin(testlib_api.SqlTestCase): def setUp(self): super(EdgeUtilsTestCaseMixin, self).setUp() nsxv_manager_p = mock.patch(vmware.VCNS_DRIVER_NAME, autospec=True) self.nsxv_manager = nsxv_manager_p.start() task = mock.Mock() nsxv_manager_p.return_value = task self.nsxv_manager.callbacks = mock.Mock() self.nsxv_manager.vcns = mock.Mock() get_ver = mock.patch.object(self.nsxv_manager.vcns, 'get_version').start() get_ver.return_value = '6.1.4' self.ctx = context.get_admin_context() self.addCleanup(nsxv_manager_p.stop) self.az = (nsx_az.NsxVAvailabilityZones(). get_default_availability_zone()) def _create_router(self, name='router1'): return {'name': name, 'id': _uuid()} def _create_network(self, name='network'): return {'name': name, 'id': _uuid()} def _create_subnet(self, name='subnet'): return {'name': name, 'id': _uuid()} def _populate_vcns_router_binding(self, bindings): for binding in bindings: nsxv_db.init_edge_vnic_binding(self.ctx.session, binding['edge_id']) nsxv_db.add_nsxv_router_binding( self.ctx.session, binding['router_id'], binding['edge_id'], None, binding['status'], appliance_size=binding['appliance_size'], edge_type=binding['edge_type'], availability_zone=binding['availability_zone']) class DummyPlugin(object): def get_network_az_by_net_id(self, context, network_id): return (nsx_az.NsxVAvailabilityZones(). get_default_availability_zone()) class EdgeDHCPManagerTestCase(EdgeUtilsTestCaseMixin): def setUp(self): super(EdgeDHCPManagerTestCase, self).setUp() self.edge_manager = edge_utils.EdgeManager(self.nsxv_manager, None) self.check = mock.patch.object(self.edge_manager, 'check_edge_active_at_backend').start() self.check.return_value = True def test_create_dhcp_edge_service(self): fake_edge_pool = [{'status': constants.ACTIVE, 'edge_id': 'edge-1', 'router_id': 'backup-11111111-1111', 'appliance_size': 'compact', 'edge_type': 'service', 'availability_zone': DEFAULT_AZ}, {'status': constants.PENDING_DELETE, 'edge_id': 'edge-2', 'router_id': 'dhcp-22222222-2222', 'appliance_size': 'compact', 'edge_type': 'service', 'availability_zone': DEFAULT_AZ}, {'status': constants.PENDING_DELETE, 'edge_id': 'edge-3', 'router_id': 'backup-33333333-3333', 'appliance_size': 'compact', 'edge_type': 'service', 'availability_zone': DEFAULT_AZ}] self._populate_vcns_router_binding(fake_edge_pool) fake_network = self._create_network() fake_subnet = self._create_subnet(fake_network['id']) self.edge_manager.plugin = DummyPlugin() with mock.patch.object(self.edge_manager, '_get_used_edges', return_value=([], [])): self.edge_manager.create_dhcp_edge_service(self.ctx, fake_network['id'], fake_subnet) self.nsxv_manager.rename_edge.assert_called_once_with('edge-1', mock.ANY) def test_get_random_available_edge(self): available_edge_ids = ['edge-1', 'edge-2'] selected_edge_id = self.edge_manager._get_random_available_edge( available_edge_ids) self.assertIn(selected_edge_id, available_edge_ids) def test_get_random_available_edge_missing_edges_returns_none(self): available_edge_ids = ['edge-1', 'edge-2'] # Always return inactive(False) while checking whether the edge # exists on the backend. with mock.patch.object(self.edge_manager, 'check_edge_active_at_backend', return_value=False): selected_edge_id = self.edge_manager._get_random_available_edge( available_edge_ids) # If no active edges are found on the backend, return None so that # a new DHCP edge is created. self.assertIsNone(selected_edge_id) class EdgeUtilsTestCase(EdgeUtilsTestCaseMixin): def setUp(self): super(EdgeUtilsTestCase, self).setUp() self.edge_manager = edge_utils.EdgeManager(self.nsxv_manager, None) # Args for vcns interface configuration self.internal_ip = '10.0.0.1' self.uplink_ip = '192.168.111.30' self.subnet_mask = '255.255.255.0' self.pref_len = '24' self.edge_id = 'dummy' self.orig_vnics = ({}, {'vnics': [ {'addressGroups': {'addressGroups': [ {'subnetMask': self.subnet_mask, 'subnetPrefixLength': self.pref_len, 'primaryAddress': self.uplink_ip}]}, 'type': 'uplink', 'index': 1}, {'addressGroups': {'addressGroups': [ {'subnetMask': self.subnet_mask, 'subnetPrefixLength': self.pref_len, 'primaryAddress': self.internal_ip}]}, 'type': 'internal', 'index': 2}]} ) # Args for vcns vdr interface configuration self.vdr_ip = '10.0.0.1' self.vnic = 1 self.orig_vdr = ({}, {'index': 2, 'addressGroups': {'addressGroups': [{'subnetMask': self.subnet_mask, 'subnetPrefixLength': self.pref_len, 'primaryAddress': self.vdr_ip}]}, 'type': 'internal'}) def test_create_lrouter(self): lrouter = self._create_router() self.nsxv_manager.deploy_edge.reset_mock() edge_utils.create_lrouter(self.nsxv_manager, self.ctx, lrouter, lswitch=None, dist=False, availability_zone=self.az) self.nsxv_manager.deploy_edge.assert_called_once_with(self.ctx, lrouter['id'], (lrouter['name'] + '-' + lrouter['id']), internal_network=None, dist=False, availability_zone=self.az, appliance_size=vcns_const.SERVICE_SIZE_MAPPING['router']) def _test_update_intereface_primary_addr(self, old_ip, new_ip, isUplink): fixed_vnic = {'addressGroups': {'addressGroups': [ {'subnetMask': self.subnet_mask, 'subnetPrefixLength': self.pref_len, 'primaryAddress': new_ip}] if new_ip else []}, 'type': 'uplink' if isUplink else 'internal', 'index': 1 if isUplink else 2} with mock.patch.object(self.nsxv_manager.vcns, 'get_interfaces', return_value=self.orig_vnics): self.edge_manager.update_interface_addr( self.ctx, self.edge_id, old_ip, new_ip, self.subnet_mask, is_uplink=isUplink) self.nsxv_manager.vcns.update_interface.assert_called_once_with( self.edge_id, fixed_vnic) def test_update_interface_addr_intrernal(self): self._test_update_intereface_primary_addr( self.internal_ip, '10.0.0.2', False) def test_remove_interface_primary_addr_intrernal(self): self._test_update_intereface_primary_addr( self.internal_ip, None, False) def test_update_interface_addr_uplink(self): self._test_update_intereface_primary_addr( self.uplink_ip, '192.168.111.31', True) def test_remove_interface_primary_addr_uplink(self): self._test_update_intereface_primary_addr( self.uplink_ip, None, True) def _test_update_intereface_secondary_addr(self, old_ip, new_ip): addr_group = {'subnetMask': self.subnet_mask, 'subnetPrefixLength': self.pref_len, 'primaryAddress': self.uplink_ip, 'secondaryAddresses': {'type': 'secondary_addresses', 'ipAddress': [new_ip]}} fixed_vnic = {'addressGroups': {'addressGroups': [addr_group]}, 'type': 'uplink', 'index': 1} with mock.patch.object(self.nsxv_manager.vcns, 'get_interfaces', return_value=self.orig_vnics): self.edge_manager.update_interface_addr( self.ctx, self.edge_id, old_ip, new_ip, self.subnet_mask, is_uplink=True) self.nsxv_manager.vcns.update_interface.assert_called_once_with( self.edge_id, fixed_vnic) def test_add_secondary_interface_addr(self): self._test_update_intereface_secondary_addr( None, '192.168.111.31') def test_update_interface_addr_fail(self): # Old ip is not configured on the interface, so we should fail old_ip = '192.168.111.32' new_ip = '192.168.111.31' with mock.patch.object(self.nsxv_manager.vcns, 'get_interfaces', return_value=self.orig_vnics): self.assertRaises( nsx_exc.NsxPluginException, self.edge_manager.update_interface_addr, self.ctx, self.edge_id, old_ip, new_ip, self.subnet_mask, is_uplink=True) def _test_update_vdr_intereface_primary_addr(self, old_ip, new_ip): fixed_vnic = {'addressGroups': {'addressGroups': [ {'subnetMask': self.subnet_mask, 'subnetPrefixLength': self.pref_len, 'primaryAddress': new_ip}] if new_ip else []}, 'type': 'internal', 'index': 2} with mock.patch.object(self.nsxv_manager.vcns, 'get_vdr_internal_interface', return_value=self.orig_vdr): with mock.patch.object(self.nsxv_manager.vcns, 'update_vdr_internal_interface') as vcns_update: self.edge_manager.update_vdr_interface_addr( self.ctx, self.edge_id, self.vnic, old_ip, new_ip, self.subnet_mask) vcns_update.assert_called_once_with(self.edge_id, self.vnic, {'interface': fixed_vnic}) def test_update_vdr_interface_addr_intrernal(self): self._test_update_vdr_intereface_primary_addr( self.vdr_ip, '20.0.0.2') def test_remove_vdr_interface_primary_addr_intrernal(self): self._test_update_vdr_intereface_primary_addr( self.vdr_ip, None) def test_update_vdr_interface_addr_fail(self): # Old ip is not configured on the vdr interface, so we should fail old_ip = '192.168.111.32' new_ip = '192.168.111.31' with mock.patch.object(self.nsxv_manager.vcns, 'get_vdr_internal_interface', return_value=self.orig_vdr): self.assertRaises( nsx_exc.NsxPluginException, self.edge_manager.update_vdr_interface_addr, self.ctx, self.edge_id, self.vnic, old_ip, new_ip, self.subnet_mask) class EdgeManagerTestCase(EdgeUtilsTestCaseMixin): def setUp(self): super(EdgeManagerTestCase, self).setUp() cfg.CONF.set_override('backup_edge_pool', [], 'nsxv') self.edge_manager = edge_utils.EdgeManager(self.nsxv_manager, None) self.check = mock.patch.object(self.edge_manager, 'check_edge_active_at_backend').start() self.check.side_effect = self.check_edge_active_at_backend self.default_edge_pool_dicts = {'default': { nsxv_constants.SERVICE_EDGE: { nsxv_constants.LARGE: {'minimum_pooled_edges': 1, 'maximum_pooled_edges': 3}, nsxv_constants.COMPACT: {'minimum_pooled_edges': 1, 'maximum_pooled_edges': 3}}, nsxv_constants.VDR_EDGE: {}}} self.vdr_edge_pool_dicts = {'default': { nsxv_constants.SERVICE_EDGE: {}, nsxv_constants.VDR_EDGE: { nsxv_constants.LARGE: {'minimum_pooled_edges': 1, 'maximum_pooled_edges': 3}}}} def check_edge_active_at_backend(self, edge_id): # workaround to let edge_id None pass since we wrapped router binding # db update op. if edge_id is None: edge_id = "" return not (edge_id.startswith(EDGE_ERROR1) or edge_id.startswith(EDGE_ERROR2)) def test_backup_edge_pool_with_default(self): cfg.CONF.set_override('backup_edge_pool', ['service:large:1:3', 'service:compact:1:3'], 'nsxv') az = nsx_az.NsxVAvailabilityZone(None) edge_pool_dicts = edge_utils.parse_backup_edge_pool_opt_per_az(az) self.assertEqual(self.default_edge_pool_dicts['default'], edge_pool_dicts) def test_backup_edge_pool_with_empty_conf(self): cfg.CONF.set_override('backup_edge_pool', [], 'nsxv') az = nsx_az.NsxVAvailabilityZone(None) edge_pool_dicts = edge_utils.parse_backup_edge_pool_opt_per_az(az) expect_edge_pool_dicts = { nsxv_constants.SERVICE_EDGE: {}, nsxv_constants.VDR_EDGE: {}} self.assertEqual(expect_edge_pool_dicts, edge_pool_dicts) def test_backup_edge_pool_with_vdr_conf(self): cfg.CONF.set_override('backup_edge_pool', ['vdr:large:1:3'], 'nsxv') az = nsx_az.NsxVAvailabilityZone(None) edge_pool_dicts = edge_utils.parse_backup_edge_pool_opt_per_az(az) expect_edge_pool_dicts = self.vdr_edge_pool_dicts['default'] self.assertEqual(expect_edge_pool_dicts, edge_pool_dicts) def test_backup_edge_pool_with_duplicate_conf(self): cfg.CONF.set_override('backup_edge_pool', ['service:compact:1:3', 'service::3:4'], 'nsxv') az = nsx_az.NsxVAvailabilityZone(None) self.assertRaises(n_exc.Invalid, edge_utils.parse_backup_edge_pool_opt_per_az, az) def _create_router_bindings(self, num, status, id_prefix, size, edge_type, availability_zone): if not availability_zone: availability_zone = self.az return [{'status': status, 'edge_id': id_prefix + '-edge-' + str(i), 'router_id': (vcns_const.BACKUP_ROUTER_PREFIX + id_prefix + str(i)), 'appliance_size': size, 'edge_type': edge_type, 'availability_zone': availability_zone.name} for i in moves.range(num)] def _create_available_router_bindings( self, num, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): status = constants.ACTIVE id_prefix = EDGE_AVAIL + size + '-' + edge_type return self._create_router_bindings( num, status, id_prefix, size, edge_type, availability_zone) def _create_creating_router_bindings( self, num, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): status = constants.PENDING_CREATE id_prefix = EDGE_CREATING + size + '-' + edge_type return self._create_router_bindings( num, status, id_prefix, size, edge_type, availability_zone) def _create_error_router_bindings( self, num, status=constants.ERROR, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): id_prefix = EDGE_ERROR1 + size + '-' + edge_type return self._create_router_bindings( num, status, id_prefix, size, edge_type, availability_zone) def _create_error_router_bindings_at_backend( self, num, status=constants.ACTIVE, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): id_prefix = EDGE_ERROR2 + size + '-' + edge_type return self._create_router_bindings( num, status, id_prefix, size, edge_type, availability_zone) def _create_deleting_router_bindings( self, num, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): status = constants.PENDING_DELETE id_prefix = EDGE_DELETING + size + '-' + edge_type return self._create_router_bindings( num, status, id_prefix, size, edge_type, availability_zone) def _create_edge_pools(self, avail, creating, error, error_at_backend, deleting, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE): """Create a backup edge pool with different status of edges. Backup edges would be edges with avail, creating and error_at_backend, while available edges would only be edges with avail status. """ availability_zone = self.az return ( self._create_error_router_bindings( error, size=size, edge_type=edge_type, availability_zone=availability_zone) + self._create_deleting_router_bindings( deleting, size=size, edge_type=edge_type, availability_zone=availability_zone) + self._create_error_router_bindings_at_backend( error_at_backend, size=size, edge_type=edge_type, availability_zone=availability_zone) + self._create_creating_router_bindings( creating, size=size, edge_type=edge_type, availability_zone=availability_zone) + self._create_available_router_bindings( avail, size=size, edge_type=edge_type, availability_zone=availability_zone)) def _create_backup_router_bindings( self, avail, creating, error, error_at_backend, deleting, error_status=constants.PENDING_DELETE, error_at_backend_status=constants.PENDING_DELETE, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): if not availability_zone: availability_zone = self.az return ( self._create_error_router_bindings( error, status=error_status, size=size, edge_type=edge_type, availability_zone=availability_zone) + self._create_error_router_bindings_at_backend( error_at_backend, status=error_at_backend_status, size=size, edge_type=edge_type, availability_zone=availability_zone) + self._create_creating_router_bindings( creating, size=size, edge_type=edge_type, availability_zone=availability_zone) + self._create_available_router_bindings( avail, size=size, edge_type=edge_type, availability_zone=availability_zone) + self._create_deleting_router_bindings( deleting, size=size, edge_type=edge_type, availability_zone=availability_zone)) def _verify_router_bindings(self, exp_bindings, act_db_bindings): exp_dict = dict(zip([binding['router_id'] for binding in exp_bindings], exp_bindings)) act_bindings = [{'router_id': binding['router_id'], 'edge_id': binding['edge_id'], 'status': binding['status'], 'appliance_size': binding['appliance_size'], 'edge_type': binding['edge_type'], 'availability_zone': binding['availability_zone']} for binding in act_db_bindings] act_dict = dict(zip([binding['router_id'] for binding in act_bindings], act_bindings)) self.assertEqual(exp_dict, act_dict) def test_get_backup_edge_bindings(self): """Test get backup edges filtering out deleting and error edges.""" pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT)) self._populate_vcns_router_binding(pool_edges) expect_backup_bindings = self._create_backup_router_bindings( 1, 2, 0, 4, 0, error_at_backend_status=constants.ACTIVE, size=nsxv_constants.LARGE) backup_bindings = self.edge_manager._get_backup_edge_bindings(self.ctx, appliance_size=nsxv_constants.LARGE, availability_zone=self.az) self._verify_router_bindings(expect_backup_bindings, backup_bindings) def test_get_available_router_bindings(self): appliance_size = nsxv_constants.LARGE edge_type = nsxv_constants.SERVICE_EDGE pool_edges = (self._create_edge_pools(1, 2, 3, 0, 5) + self._create_edge_pools( 1, 2, 3, 0, 5, size=nsxv_constants.COMPACT)) self._populate_vcns_router_binding(pool_edges) expect_backup_bindings = self._create_backup_router_bindings( 1, 2, 3, 0, 5, error_status=constants.ERROR) binding = self.edge_manager._get_available_router_binding( self.ctx, appliance_size=appliance_size, edge_type=edge_type, availability_zone=self.az) router_bindings = [ binding_db for binding_db in nsxv_db.get_nsxv_router_bindings( self.ctx.session) if (binding_db['appliance_size'] == appliance_size and binding_db['edge_type'] == edge_type and binding_db['availability_zone'] == 'default')] self._verify_router_bindings(expect_backup_bindings, router_bindings) edge_id = (EDGE_AVAIL + appliance_size + '-' + edge_type + '-edge-' + str(0)) self.assertEqual(edge_id, binding['edge_id']) def test_check_backup_edge_pool_with_max(self): appliance_size = nsxv_constants.LARGE edge_type = nsxv_constants.SERVICE_EDGE pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT)) self._populate_vcns_router_binding(pool_edges) expect_pool_bindings = self._create_backup_router_bindings( 1, 2, 3, 4, 5, error_status=constants.ERROR, error_at_backend_status=constants.PENDING_DELETE) self.edge_manager._check_backup_edge_pool( 0, 3, appliance_size=appliance_size, edge_type=edge_type, availability_zone=self.az) router_bindings = [ binding for binding in nsxv_db.get_nsxv_router_bindings(self.ctx.session) if (binding['appliance_size'] == appliance_size and binding['edge_type'] == edge_type)] self._verify_router_bindings(expect_pool_bindings, router_bindings) def test_check_backup_edge_pool_with_min(self): appliance_size = nsxv_constants.LARGE edge_type = nsxv_constants.SERVICE_EDGE pool_edges = (self._create_edge_pools(1, 2, 3, 0, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT)) self._populate_vcns_router_binding(pool_edges) edge_utils.eventlet = mock.Mock() edge_utils.eventlet.spawn_n.return_value = None self.edge_manager._check_backup_edge_pool( 5, 10, appliance_size=appliance_size, edge_type=edge_type, availability_zone=self.az) router_bindings = [ binding for binding in nsxv_db.get_nsxv_router_bindings(self.ctx.session) if binding['edge_id'] is None and binding['status'] == constants.PENDING_CREATE] binding_ids = [bind.router_id for bind in router_bindings] self.assertEqual(2, len(router_bindings)) edge_utils.eventlet.spawn_n.assert_called_with( mock.ANY, binding_ids, appliance_size, edge_type, self.az) def test_check_backup_edge_pools_with_empty_conf(self): pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + self._create_edge_pools( 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) self._populate_vcns_router_binding(pool_edges) self.edge_manager._check_backup_edge_pools() router_bindings = nsxv_db.get_nsxv_router_bindings(self.ctx.session) for binding in router_bindings: self.assertEqual(constants.PENDING_DELETE, binding['status']) def test_check_backup_edge_pools_with_default(self): self.edge_manager.edge_pool_dicts = self.default_edge_pool_dicts pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + self._create_edge_pools( 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) self._populate_vcns_router_binding(pool_edges) self.edge_manager._check_backup_edge_pools() router_bindings = nsxv_db.get_nsxv_router_bindings(self.ctx.session) expect_large_bindings = self._create_backup_router_bindings( 1, 2, 3, 4, 5, error_status=constants.PENDING_DELETE, error_at_backend_status=constants.PENDING_DELETE) large_bindings = [ binding for binding in router_bindings if (binding['appliance_size'] == nsxv_constants.LARGE and binding['edge_type'] == nsxv_constants.SERVICE_EDGE)] self._verify_router_bindings(expect_large_bindings, large_bindings) expect_compact_bindings = self._create_backup_router_bindings( 1, 2, 3, 4, 5, error_status=constants.PENDING_DELETE, error_at_backend_status=constants.PENDING_DELETE, size=nsxv_constants.COMPACT) compact_bindings = [ binding for binding in router_bindings if (binding['appliance_size'] == nsxv_constants.COMPACT and binding['edge_type'] == nsxv_constants.SERVICE_EDGE)] self._verify_router_bindings(expect_compact_bindings, compact_bindings) vdr_bindings = [ binding for binding in router_bindings if (binding['appliance_size'] == nsxv_constants.LARGE and binding['edge_type'] == nsxv_constants.VDR_EDGE)] for binding in vdr_bindings: self.assertEqual(constants.PENDING_DELETE, binding['status']) def test_check_backup_edge_pools_with_vdr(self): self.edge_manager.edge_pool_dicts = self.vdr_edge_pool_dicts pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + self._create_edge_pools( 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) self._populate_vcns_router_binding(pool_edges) self.edge_manager._check_backup_edge_pools() router_bindings = nsxv_db.get_nsxv_router_bindings(self.ctx.session) expect_vdr_bindings = self._create_backup_router_bindings( 1, 2, 3, 4, 5, error_status=constants.PENDING_DELETE, error_at_backend_status=constants.PENDING_DELETE, edge_type=nsxv_constants.VDR_EDGE) vdr_bindings = [ binding for binding in router_bindings if (binding['appliance_size'] == nsxv_constants.LARGE and binding['edge_type'] == nsxv_constants.VDR_EDGE)] self._verify_router_bindings(expect_vdr_bindings, vdr_bindings) service_bindings = [ binding for binding in router_bindings if binding['edge_type'] == nsxv_constants.SERVICE_EDGE] for binding in service_bindings: self.assertEqual(constants.PENDING_DELETE, binding['status']) def test_allocate_edge_appliance_with_empty(self): self.edge_manager._clean_all_error_edge_bindings = mock.Mock() self.edge_manager._allocate_edge_appliance( self.ctx, 'fake_id', 'fake_name', availability_zone=self.az) assert not self.edge_manager._clean_all_error_edge_bindings.called def test_allocate_large_edge_appliance_with_default(self): self.edge_manager.edge_pool_dicts = self.default_edge_pool_dicts pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + self._create_edge_pools( 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) self._populate_vcns_router_binding(pool_edges) self.edge_manager._allocate_edge_appliance( self.ctx, 'fake_id', 'fake_name', appliance_size=nsxv_constants.LARGE, availability_zone=self.az) edge_id = (EDGE_AVAIL + nsxv_constants.LARGE + '-' + nsxv_constants.SERVICE_EDGE + '-edge-' + str(0)) self.nsxv_manager.rename_edge.assert_has_calls( [mock.call(edge_id, 'fake_name')]) def test_allocate_compact_edge_appliance_with_default(self): self.edge_manager.edge_pool_dicts = self.default_edge_pool_dicts pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + self._create_edge_pools( 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) self._populate_vcns_router_binding(pool_edges) self.edge_manager._allocate_edge_appliance( self.ctx, 'fake_id', 'fake_name', appliance_size=nsxv_constants.COMPACT, availability_zone=self.az) edge_id = (EDGE_AVAIL + nsxv_constants.COMPACT + '-' + nsxv_constants.SERVICE_EDGE + '-edge-' + str(0)) self.nsxv_manager.rename_edge.assert_has_calls( [mock.call(edge_id, 'fake_name')]) def test_allocate_large_edge_appliance_with_vdr(self): self.edge_manager.edge_pool_dicts = self.vdr_edge_pool_dicts pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + self._create_edge_pools( 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) self._populate_vcns_router_binding(pool_edges) self.edge_manager._allocate_edge_appliance( self.ctx, 'fake_id', 'fake_name', dist=True, appliance_size=nsxv_constants.LARGE, availability_zone=self.az) edge_id = (EDGE_AVAIL + nsxv_constants.LARGE + '-' + nsxv_constants.VDR_EDGE + '-edge-' + str(0)) self.nsxv_manager.rename_edge.assert_has_calls( [mock.call(edge_id, 'fake_name')]) def test_free_edge_appliance_with_empty(self): self.edge_manager._clean_all_error_edge_bindings = mock.Mock() self.edge_manager._allocate_edge_appliance( self.ctx, 'fake_id', 'fake_name', availability_zone=self.az) self.edge_manager._free_edge_appliance( self.ctx, 'fake_id') assert not self.edge_manager._clean_all_error_edge_bindings.called def test_free_edge_appliance_with_default(self): self.edge_manager.edge_pool_dicts = self.default_edge_pool_dicts self.edge_manager._allocate_edge_appliance( self.ctx, 'fake_id', 'fake_name', availability_zone=self.az) self.edge_manager._free_edge_appliance( self.ctx, 'fake_id') assert not self.nsxv_manager.delete_edge.called self.nsxv_manager.update_edge.assert_has_calls( [mock.call(mock.ANY, mock.ANY, mock.ANY, mock.ANY, None, appliance_size=nsxv_constants.COMPACT, dist=False, availability_zone=mock.ANY)]) def test_free_edge_appliance_with_default_with_full(self): self.edge_pool_dicts = { nsxv_constants.SERVICE_EDGE: { nsxv_constants.LARGE: {'minimum_pooled_edges': 1, 'maximum_pooled_edges': 1}, nsxv_constants.COMPACT: {'minimum_pooled_edges': 1, 'maximum_pooled_edges': 3}}, nsxv_constants.VDR_EDGE: {}} # Avoid use of eventlet greenpool as this breaks the UT with mock.patch.object(self.edge_manager, '_get_worker_pool'): self.edge_manager._allocate_edge_appliance( self.ctx, 'fake_id', 'fake_name', availability_zone=self.az) self.edge_manager._free_edge_appliance( self.ctx, 'fake_id') class VdrTransitNetUtilDefaultTestCase(EdgeUtilsTestCaseMixin): EXPECTED_NETMASK = '255.255.255.240' EXPECTED_TLR_IP = '169.254.2.1' EXPECTED_PLR_IP = conf.DEFAULT_PLR_ADDRESS def setUp(self): super(VdrTransitNetUtilDefaultTestCase, self).setUp() def test_get_vdr_transit_network_netmask(self): self.assertEqual(edge_utils.get_vdr_transit_network_netmask(), self.EXPECTED_NETMASK) def test_get_vdr_transit_network_tlr_address(self): self.assertEqual(edge_utils.get_vdr_transit_network_tlr_address(), self.EXPECTED_TLR_IP) def test_get_vdr_transit_network_plr_address(self): self.assertEqual(edge_utils.get_vdr_transit_network_plr_address(), self.EXPECTED_PLR_IP) def test_is_overlapping_reserved_subnets(self): self.assertTrue( edge_utils.is_overlapping_reserved_subnets('169.254.1.0/24', ['169.254.0.0/16'])) self.assertTrue( edge_utils.is_overlapping_reserved_subnets('169.254.1.0/24', ['192.168.2.0/24', '169.254.0.0/16'])) self.assertFalse( edge_utils.is_overlapping_reserved_subnets('169.254.1.0/24', ['169.253.0.0/16'])) self.assertFalse( edge_utils.is_overlapping_reserved_subnets('169.254.1.0/24', ['192.168.2.0/24', '169.253.0.0/16'])) class VdrTransitNetUtilTestCase(EdgeUtilsTestCaseMixin): EXPECTED_NETMASK = '255.255.255.0' EXPECTED_TLR_IP = '192.168.1.1' EXPECTED_PLR_IP = '192.168.1.2' def setUp(self): super(VdrTransitNetUtilTestCase, self).setUp() class VdrTransitNetValidatorTestCase(EdgeUtilsTestCaseMixin): def setUp(self): super(VdrTransitNetValidatorTestCase, self).setUp() def _test_validator(self, cidr): cfg.CONF.set_override('vdr_transit_network', cidr, 'nsxv') return edge_utils.validate_vdr_transit_network() def test_vdr_transit_net_validator_success(self): self.assertIsNone(self._test_validator('192.168.253.0/24')) def test_vdr_transit_net_validator_junk_cidr(self): self.assertRaises(n_exc.Invalid, self._test_validator, 'not_a_subnet') def test_vdr_transit_net_validator_too_small_cidr(self): self.assertRaises( n_exc.Invalid, self._test_validator, '169.254.2.0/31') def test_vdr_transit_net_validator_overlap_cidr(self): self.assertRaises( n_exc.Invalid, self._test_validator, '169.254.0.0/16') vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/housekeeper/0000775000175100017510000000000013244524600023714 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/housekeeper/test_error_dhcp_edge.py0000666000175100017510000006004413244523345030453 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import mock from neutron.tests import base from neutron_lib.plugins import constants from vmware_nsx.plugins.nsx_v.housekeeper import error_dhcp_edge FAKE_ROUTER_BINDINGS = [ { 'router_id': 'dhcp-16c224dd-7c2b-4241-a447-4fc07a3', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'}, { 'router_id': 'dhcp-31341032-6911-4596-8b64-afce92f', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'}, { 'router_id': 'dhcp-51c97abb-8ac9-4f24-b914-cc30cf8', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'}, { 'router_id': 'dhcp-5d01cea4-58f8-4a16-9be0-11012ca', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'}, { 'router_id': 'dhcp-65a5335c-4c72-4721-920e-5abdc9e', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'}, { 'router_id': 'dhcp-83bce421-b72c-4744-9285-a0fcc25', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'}, { 'router_id': 'dhcp-9d2f5b66-c252-4681-86af-9460484', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'}, { 'router_id': 'dhcp-aea44408-0448-42dd-9ae6-ed940da', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'}] BAD_ROUTER_BINDING = { 'router_id': 'dhcp-11111111-1111-1111-aaaa-aaaaaaa', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'} FAKE_EDGE_VNIC_BINDS = [ { 'network_id': '7c0b6fb5-d86c-4e5e-a2af-9ce36971764b', 'vnic_index': 1, 'edge_id': 'edge-752', 'tunnel_index': 1}, { 'network_id': '16c224dd-7c2b-4241-a447-4fc07a38dc80', 'vnic_index': 2, 'edge_id': 'edge-752', 'tunnel_index': 4}, { 'network_id': '65a5335c-4c72-4721-920e-5abdc9e09ba4', 'vnic_index': 2, 'edge_id': 'edge-752', 'tunnel_index': 6}, { 'network_id': 'aea44408-0448-42dd-9ae6-ed940dac564a', 'vnic_index': 4, 'edge_id': 'edge-752', 'tunnel_index': 10}, { 'network_id': '5d01cea4-58f8-4a16-9be0-11012cadbf55', 'vnic_index': 4, 'edge_id': 'edge-752', 'tunnel_index': 12}, { 'network_id': '51c97abb-8ac9-4f24-b914-cc30cf8e856a', 'vnic_index': 6, 'edge_id': 'edge-752', 'tunnel_index': 16}, { 'network_id': '31341032-6911-4596-8b64-afce92f46bf4', 'vnic_index': 6, 'edge_id': 'edge-752', 'tunnel_index': 18}, { 'network_id': '9d2f5b66-c252-4681-86af-946048414a1f', 'vnic_index': 8, 'edge_id': 'edge-752', 'tunnel_index': 22}, { 'network_id': '83bce421-b72c-4744-9285-a0fcc25b001a', 'vnic_index': 8, 'edge_id': 'edge-752', 'tunnel_index': 24}] BAD_VNIC_BINDING = { 'network_id': '11111111-1111-1111-aaaa-aaaaaaabbaac', 'vnic_index': 8, 'edge_id': 'edge-752', 'tunnel_index': 21} FAKE_INTERNAL_NETWORKS = [ {'availability_zone': u'default', 'network_id': u'7c0b6fb5-d86c-4e5e-a2af-9ce36971764b', 'network_purpose': 'inter_edge_net', 'updated_at': None, '_rev_bumped': False, 'created_at': datetime.datetime(2017, 12, 13, 12, 28, 18)}] FAKE_NETWORK_RESULTS = [{'id': 'e3a02b46-b9c9-4f2f-bcea-7978355a7dca'}, {'id': '031eaf4b-49b8-4003-9369-8a0dd5d7a163'}, {'id': '16c224dd-7c2b-4241-a447-4fc07a38dc80'}, {'id': '1a3b570c-c8b5-411e-8e13-d4dc0b3e56b2'}, {'id': '24b31d2c-fcec-45e5-bdcb-aa089d3713ae'}, {'id': '31341032-6911-4596-8b64-afce92f46bf4'}, {'id': '51c97abb-8ac9-4f24-b914-cc30cf8e856a'}, {'id': '5484b39b-ec6e-43f4-b900-fc1b2c49c71a'}, {'id': '54eae237-3516-4f82-b46f-f955e91c989c'}, {'id': '5a859fa0-bea0-41be-843a-9f9bf39e2509'}, {'id': '5d01cea4-58f8-4a16-9be0-11012cadbf55'}, {'id': '65a5335c-4c72-4721-920e-5abdc9e09ba4'}, {'id': '708f11d4-00d0-48ea-836f-01273cbf36cc'}, {'id': '7c0b6fb5-d86c-4e5e-a2af-9ce36971764b'}, {'id': '83bce421-b72c-4744-9285-a0fcc25b001a'}, {'id': '9d2f5b66-c252-4681-86af-946048414a1f'}, {'id': 'aea44408-0448-42dd-9ae6-ed940dac564a'}, {'id': 'b0cee4e3-266b-48d3-a651-04f1985fe4b0'}, {'id': 'be82b8c5-96a9-4e08-a965-bb09d48ec161'}, {'id': 'e69279c6-9a1e-4f7b-b421-b8b3eb92c54b'}] BACKEND_EDGE_VNICS = {'vnics': [ {'label': 'vNic_0', 'name': 'external', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'uplink', 'isConnected': True, 'index': 0, 'portgroupId': 'network-13', 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_1', 'name': 'internal1', 'addressGroups': { 'addressGroups': [ {'primaryAddress': '169.254.128.14', 'secondaryAddresses': { 'type': 'secondary_addresses', 'ipAddress': ['169.254.169.254']}, 'subnetMask': '255.255.128.0', 'subnetPrefixLength': '17'}]}, 'mtu': 1500, 'type': 'internal', 'isConnected': True, 'index': 1, 'portgroupId': 'virtualwire-472', 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_2', 'name': 'internal2', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'trunk', 'subInterfaces': {'subInterfaces': [ {'isConnected': True, 'label': 'vNic_10', 'name': '1639ff40-8137-4803-a29f-dcf0efc35b34', 'index': 10, 'tunnelId': 4, 'logicalSwitchId': 'virtualwire-497', 'logicalSwitchName': '16c224dd-7c2b-4241-a447-4fc07a38dc80', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': {'addressGroups': [{ 'primaryAddress': '10.24.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5025, 'subInterfaceBackingType': 'NETWORK'}, {'isConnected': True, 'label': 'vNic_12', 'name': 'd1515746-a21a-442d-8347-62b36f5791d6', 'index': 12, 'tunnelId': 6, 'logicalSwitchId': 'virtualwire-499', 'logicalSwitchName': '65a5335c-4c72-4721-920e-5abdc9e09ba4', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': {'addressGroups': [ {'primaryAddress': '10.26.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5027, 'subInterfaceBackingType': 'NETWORK'}]}, 'isConnected': True, 'index': 2, 'portgroupId': 'dvportgroup-1550', 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_3', 'name': 'vnic3', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 3, 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_4', 'name': 'internal4', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'trunk', 'subInterfaces': {'subInterfaces': [ {'isConnected': True, 'label': 'vNic_16', 'name': 'e2405dc6-21d7-4421-a70c-3eecf675b286', 'index': 16, 'tunnelId': 10, 'logicalSwitchId': 'virtualwire-503', 'logicalSwitchName': 'aea44408-0448-42dd-9ae6-ed940dac564a', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': {'addressGroups': [ {'primaryAddress': '10.30.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5031, 'subInterfaceBackingType': 'NETWORK'}, {'isConnected': True, 'label': 'vNic_18', 'name': 'a10fb348-30e4-477f-817f-bb3c9c9fd3f5', 'index': 18, 'tunnelId': 12, 'logicalSwitchId': 'virtualwire-505', 'logicalSwitchName': '5d01cea4-58f8-4a16-9be0-11012cadbf55', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': {'addressGroups': [ {'primaryAddress': '10.32.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5033, 'subInterfaceBackingType': 'NETWORK'}]}, 'isConnected': True, 'index': 4, 'portgroupId': 'dvportgroup-1559', 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_5', 'name': 'vnic5', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 5, 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_6', 'name': 'internal6', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'trunk', 'subInterfaces': {'subInterfaces': [ {'isConnected': True, 'label': 'vNic_22', 'name': '2da534c8-3d9b-4677-aa14-2e66efd09e3f', 'index': 22, 'tunnelId': 16, 'logicalSwitchId': 'virtualwire-509', 'logicalSwitchName': '51c97abb-8ac9-4f24-b914-cc30cf8e856a', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': {'addressGroups': [ {'primaryAddress': '10.36.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5037, 'subInterfaceBackingType': 'NETWORK'}, {'isConnected': True, 'label': 'vNic_24', 'name': 'd25f00c2-eb82-455c-87b9-d2d510d42917', 'index': 24, 'tunnelId': 18, 'logicalSwitchId': 'virtualwire-511', 'logicalSwitchName': '31341032-6911-4596-8b64-afce92f46bf4', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': {'addressGroups': [ {'primaryAddress': '10.38.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5039, 'subInterfaceBackingType': 'NETWORK'}]}, 'isConnected': True, 'index': 6, 'portgroupId': 'dvportgroup-1567', 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_7', 'name': 'vnic7', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 7, 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_8', 'name': 'internal8', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'trunk', 'subInterfaces': {'subInterfaces': [ {'isConnected': True, 'label': 'vNic_28', 'name': 'cf4cc867-e958-4f86-acea-d8a52a4c26c8', 'index': 28, 'tunnelId': 22, 'logicalSwitchId': 'virtualwire-515', 'logicalSwitchName': '9d2f5b66-c252-4681-86af-946048414a1f', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': {'addressGroups': [ {'primaryAddress': '10.42.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5043, 'subInterfaceBackingType': 'NETWORK'}, {'isConnected': True, 'label': 'vNic_30', 'name': 'ceab3d83-3ee2-4372-b5d7-f1d47be76e9d', 'index': 30, 'tunnelId': 24, 'logicalSwitchId': 'virtualwire-517', 'logicalSwitchName': '83bce421-b72c-4744-9285-a0fcc25b001a', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': {'addressGroups': [ {'primaryAddress': '10.44.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5045, 'subInterfaceBackingType': 'NETWORK'}]}, 'isConnected': True, 'index': 8, 'portgroupId': 'dvportgroup-1575', 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_9', 'name': 'vnic9', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 9, 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}]} BAD_SUBINTERFACE = { 'isConnected': True, 'label': 'vNic_31', 'name': '11111111-2222-3333-4444-555555555555', 'index': 31, 'tunnelId': 25, 'logicalSwitchId': 'virtualwire-518', 'logicalSwitchName': '55555555-4444-3333-2222-111111111111', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': { 'addressGroups': [ {'primaryAddress': '10.99.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5045, 'subInterfaceBackingType': 'NETWORK'} BAD_INTERFACE = { 'label': 'vNic_8', 'name': 'vnic8', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 8, 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True} class ErrorDhcpEdgeTestCaseReadOnly(base.BaseTestCase): def _is_readonly(self): return True def setUp(self): def get_plugin_mock(alias=constants.CORE): if alias in (constants.CORE, constants.L3): return self.plugin super(ErrorDhcpEdgeTestCaseReadOnly, self).setUp() self.plugin = mock.Mock() self.context = mock.Mock() self.context.session = mock.Mock() mock.patch('neutron_lib.plugins.directory.get_plugin', side_effect=get_plugin_mock).start() self.plugin.edge_manager = mock.Mock() self.plugin.nsx_v = mock.Mock() self.plugin.nsx_v.vcns = mock.Mock() mock.patch.object(self.plugin, 'get_availability_zone_name_by_edge', return_value='default').start() self.log = mock.Mock() error_dhcp_edge.LOG = self.log self.job = error_dhcp_edge.ErrorDhcpEdgeJob(self._is_readonly()) def test_clean_run(self): mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', return_value=[]).start() self.job.run(self.context) self.log.warning.assert_not_called() def test_invalid_router_binding(self): router_binds = copy.deepcopy(FAKE_ROUTER_BINDINGS) router_binds.append(BAD_ROUTER_BINDING) mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', return_value=router_binds).start() mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge', return_value=FAKE_EDGE_VNIC_BINDS).start() mock.patch.object(self.plugin, 'get_networks', return_value=FAKE_NETWORK_RESULTS).start() mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces', return_value=(None, BACKEND_EDGE_VNICS)).start() mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks', return_value=FAKE_INTERNAL_NETWORKS).start() self.job.run(self.context) self.log.warning.assert_called_once() def test_invalid_edge_vnic_bindings(self): def fake_vnic_bind(*args, **kwargs): # The DB content is manipulated by the housekeeper. Therefore # get_edge_vnic_bindings_by_edge() output should be altered if fake_vnic_bind.ctr < 2: ret = fake_vnic_bind.vnic_binds else: ret = FAKE_EDGE_VNIC_BINDS fake_vnic_bind.ctr += 1 return ret fake_vnic_bind.ctr = 0 fake_vnic_bind.vnic_binds = copy.deepcopy(FAKE_EDGE_VNIC_BINDS) fake_vnic_bind.vnic_binds.append(BAD_VNIC_BINDING) mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', return_value=FAKE_ROUTER_BINDINGS).start() mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge', side_effect=fake_vnic_bind).start() mock.patch.object(self.plugin, 'get_networks', return_value=FAKE_NETWORK_RESULTS).start() mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces', return_value=(None, BACKEND_EDGE_VNICS)).start() mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks', return_value=FAKE_INTERNAL_NETWORKS).start() self.job.run(self.context) self.log.warning.assert_called_once() def test_invalid_edge_sub_if(self): backend_vnics = copy.deepcopy(BACKEND_EDGE_VNICS) backend_vnics['vnics'][8]['subInterfaces']['subInterfaces'].append( BAD_SUBINTERFACE) mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', return_value=FAKE_ROUTER_BINDINGS).start() mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge', return_value=FAKE_EDGE_VNIC_BINDS).start() mock.patch.object(self.plugin, 'get_networks', return_value=FAKE_NETWORK_RESULTS).start() mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces', return_value=(None, backend_vnics)).start() mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks', return_value=FAKE_INTERNAL_NETWORKS).start() self.job.run(self.context) self.log.warning.assert_called_once() def test_missing_edge_sub_if(self): backend_vnics = copy.deepcopy(BACKEND_EDGE_VNICS) del backend_vnics['vnics'][8]['subInterfaces']['subInterfaces'][1] mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', return_value=FAKE_ROUTER_BINDINGS).start() mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge', return_value=FAKE_EDGE_VNIC_BINDS).start() mock.patch.object(self.plugin, 'get_networks', return_value=FAKE_NETWORK_RESULTS).start() mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces', return_value=(None, backend_vnics)).start() mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks', return_value=FAKE_INTERNAL_NETWORKS).start() self.job.run(self.context) self.log.warning.assert_called_once() def test_missing_edge_interface(self): backend_vnics = copy.deepcopy(BACKEND_EDGE_VNICS) backend_vnics['vnics'][8] = BAD_INTERFACE mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', return_value=FAKE_ROUTER_BINDINGS).start() mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge', return_value=FAKE_EDGE_VNIC_BINDS).start() mock.patch.object(self.plugin, 'get_networks', return_value=FAKE_NETWORK_RESULTS).start() mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces', return_value=(None, backend_vnics)).start() mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks', return_value=FAKE_INTERNAL_NETWORKS).start() self.job.run(self.context) self.assertEqual(2, self.log.warning.call_count) class ErrorDhcpEdgeTestCaseReadWrite(ErrorDhcpEdgeTestCaseReadOnly): def _is_readonly(self): return False def test_invalid_router_binding(self): del_binding = mock.patch( 'vmware_nsx.db.nsxv_db.delete_nsxv_router_binding').start() mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge', return_value=FAKE_ROUTER_BINDINGS).start() upd_binding = mock.patch( 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() super(ErrorDhcpEdgeTestCaseReadWrite, self ).test_invalid_router_binding() del_binding.assert_called_with(mock.ANY, BAD_ROUTER_BINDING['router_id']) upd_binding.assert_has_calls( [mock.call(mock.ANY, r['router_id'], status='ACTIVE') for r in FAKE_ROUTER_BINDINGS]) def test_invalid_edge_vnic_bindings(self): del_binding = mock.patch( 'vmware_nsx.db.nsxv_db.free_edge_vnic_by_network').start() mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge', return_value=FAKE_ROUTER_BINDINGS).start() upd_binding = mock.patch( 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() super(ErrorDhcpEdgeTestCaseReadWrite, self ).test_invalid_edge_vnic_bindings() del_binding.assert_called_with(mock.ANY, BAD_VNIC_BINDING['edge_id'], BAD_VNIC_BINDING['network_id']) upd_binding.assert_has_calls( [mock.call(mock.ANY, r['router_id'], status='ACTIVE') for r in FAKE_ROUTER_BINDINGS]) def test_invalid_edge_sub_if(self): mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge', return_value=FAKE_ROUTER_BINDINGS).start() upd_binding = mock.patch( 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() upd_if = mock.patch.object(self.plugin.nsx_v.vcns, 'update_interface').start() super(ErrorDhcpEdgeTestCaseReadWrite, self ).test_invalid_edge_sub_if() upd_binding.assert_has_calls( [mock.call(mock.ANY, r['router_id'], status='ACTIVE') for r in FAKE_ROUTER_BINDINGS]) upd_if.assert_called_with('edge-752', BACKEND_EDGE_VNICS['vnics'][8]) def test_missing_edge_sub_if(self): deleted_sub_if = BACKEND_EDGE_VNICS['vnics'][8]['subInterfaces'][ 'subInterfaces'][1] mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge', return_value=FAKE_ROUTER_BINDINGS).start() mock.patch.object( self.plugin.edge_manager, '_create_sub_interface', return_value=('dvportgroup-1575', deleted_sub_if)).start() upd_binding = mock.patch( 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() upd_if = mock.patch.object(self.plugin.nsx_v.vcns, 'update_interface').start() super(ErrorDhcpEdgeTestCaseReadWrite, self ).test_missing_edge_sub_if() upd_binding.assert_has_calls( [mock.call(mock.ANY, r['router_id'], status='ACTIVE') for r in FAKE_ROUTER_BINDINGS]) upd_if.assert_called_with('edge-752', BACKEND_EDGE_VNICS['vnics'][8]) def test_missing_edge_interface(self): def fake_create_subif(*args, **kwargs): deleted_sub_if = BACKEND_EDGE_VNICS['vnics'][8]['subInterfaces'][ 'subInterfaces'][fake_create_subif.ctr] fake_create_subif.ctr += 1 return (BACKEND_EDGE_VNICS['vnics'][8]['portgroupId'], deleted_sub_if) fake_create_subif.ctr = 0 mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge', return_value=FAKE_ROUTER_BINDINGS).start() mock.patch.object( self.plugin.edge_manager, '_create_sub_interface', side_effect=fake_create_subif).start() upd_binding = mock.patch( 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() upd_if = mock.patch.object(self.plugin.nsx_v.vcns, 'update_interface').start() super(ErrorDhcpEdgeTestCaseReadWrite, self ).test_missing_edge_interface() upd_binding.assert_has_calls( [mock.call(mock.ANY, r['router_id'], status='ACTIVE') for r in FAKE_ROUTER_BINDINGS]) upd_if.assert_called_with('edge-752', BACKEND_EDGE_VNICS['vnics'][8]) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/housekeeper/test_error_backup_edge.py0000666000175100017510000000614613244523345031005 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests import base from neutron_lib.plugins import constants from vmware_nsx.plugins.nsx_v.housekeeper import error_backup_edge FAKE_ROUTER_BINDINGS = [ { 'router_id': 'backup-3b0b1fe1-c984', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-782', 'edge_type': 'service', 'appliance_size': 'compact'}] class ErrorBackupEdgeTestCaseReadOnly(base.BaseTestCase): def _is_readonly(self): return True def setUp(self): def get_plugin_mock(alias=constants.CORE): if alias in (constants.CORE, constants.L3): return self.plugin super(ErrorBackupEdgeTestCaseReadOnly, self).setUp() self.plugin = mock.Mock() self.context = mock.Mock() self.context.session = mock.Mock() mock.patch('neutron_lib.plugins.directory.get_plugin', side_effect=get_plugin_mock).start() self.log = mock.Mock() error_backup_edge.LOG = self.log self.job = error_backup_edge.ErrorBackupEdgeJob(self._is_readonly()) def test_clean_run(self): mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', return_value=[]).start() self.job.run(self.context) self.log.warning.assert_not_called() def test_broken_backup_edge(self): mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', return_value=FAKE_ROUTER_BINDINGS).start() self.job.run(self.context) self.log.warning.assert_called_once() class ErrorBackupEdgeTestCaseReadWrite(ErrorBackupEdgeTestCaseReadOnly): def _is_readonly(self): return False def test_broken_backup_edge(self): upd_binding = mock.patch( 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() upd_edge = mock.patch.object(self.plugin.nsx_v, 'update_edge').start() self.job.azs = mock.Mock() az = mock.Mock() mock.patch.object(self.job.azs, 'get_availability_zone', return_value=az).start() super(ErrorBackupEdgeTestCaseReadWrite, self ).test_broken_backup_edge() upd_binding.assert_has_calls( [mock.call(mock.ANY, r['router_id'], status='ACTIVE') for r in FAKE_ROUTER_BINDINGS]) upd_edge.assert_called_with( self.context, 'backup-3b0b1fe1-c984', 'edge-782', 'backup-3b0b1fe1-c984', None, appliance_size='compact', availability_zone=az, dist=False) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/housekeeper/__init__.py0000666000175100017510000000000013244523345026022 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/test_edge_loadbalancer_driver_v2.py0000666000175100017510000014517713244523345030411 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.services.flavors import flavors_plugin from neutron.tests import base from neutron_lbaas.services.loadbalancer import data_models as lb_models from neutron_lib import context from neutron_lib import exceptions as n_exc from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield import vcns_driver from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common LB_VIP = '10.0.0.10' LB_EDGE_ID = 'edge-x' LB_ID = 'xxx-xxx' LB_TENANT_ID = 'yyy-yyy' LB_VIP_FWR_ID = 'fwr-1' LB_BINDING = {'loadbalancer_id': LB_ID, 'edge_id': LB_EDGE_ID, 'edge_fw_rule_id': LB_VIP_FWR_ID, 'vip_address': LB_VIP} LISTENER_ID = 'xxx-111' EDGE_APP_PROFILE_ID = 'appp-x' EDGE_APP_PROF_DEF = {'sslPassthrough': False, 'insertXForwardedFor': False, 'serverSslEnabled': False, 'name': LISTENER_ID, 'template': 'http', 'persistence': { 'cookieMode': 'insert', 'cookieName': 'default_cookie_name', 'method': 'cookie'}} EDGE_VIP_ID = 'vip-aaa' EDGE_VIP_DEF = {'protocol': 'http', 'name': 'vip_' + LISTENER_ID, 'connectionLimit': 0, 'defaultPoolId': None, 'ipAddress': LB_VIP, 'port': 80, 'accelerationEnabled': False, 'applicationProfileId': EDGE_APP_PROFILE_ID, 'description': ''} LISTENER_BINDING = {'loadbalancer_id': LB_ID, 'listener_id': LISTENER_ID, 'app_profile_id': EDGE_APP_PROFILE_ID, 'vse_id': EDGE_VIP_ID} POOL_ID = 'ppp-qqq' EDGE_POOL_ID = 'pool-xx' EDGE_POOL_DEF = {'transparent': False, 'name': 'pool_' + POOL_ID, 'algorithm': 'round-robin', 'description': ''} POOL_BINDING = {'loadbalancer_id': LB_ID, 'pool_id': POOL_ID, 'edge_pool_id': EDGE_POOL_ID} MEMBER_ID = 'mmm-mmm' MEMBER_ADDRESS = '10.0.0.200' EDGE_MEMBER_DEF = {'monitorPort': 80, 'name': 'member-' + MEMBER_ID, 'weight': 1, 'ipAddress': MEMBER_ADDRESS, 'port': 80, 'condition': 'disabled'} POOL_FW_SECT = '10001' HM_ID = 'hhh-mmm' EDGE_HM_ID = 'hm-xx' EDGE_HM_DEF = {'maxRetries': 1, 'interval': 3, 'type': 'icmp', 'name': HM_ID, 'timeout': 3} HM_BINDING = {'loadbalancer_id': LB_ID, 'pool_id': POOL_ID, 'hm_id': HM_ID, 'edge_id': LB_EDGE_ID, 'edge_mon_id': EDGE_HM_ID} L7POL_ID = 'l7pol-l7pol' EDGE_RULE_ID = 'app-rule-xx' L7POL_BINDING = {'policy_id': L7POL_ID, 'edge_id': LB_EDGE_ID, 'edge_app_rule_id': EDGE_RULE_ID} EDGE_L7POL_DEF = {'script': 'http-request deny if TRUE', 'name': 'pol_' + L7POL_ID} L7RULE_ID1 = 'l7rule-111' L7RULE_ID2 = 'l7rule-222' class BaseTestEdgeLbaasV2(base.BaseTestCase): def _tested_entity(self): return None def setUp(self): super(BaseTestEdgeLbaasV2, self).setUp() self.context = context.get_admin_context() callbacks = mock.Mock() callbacks.plugin = mock.Mock() self.edge_driver = vcns_driver.VcnsDriver(callbacks) self.lbv2_driver = mock.Mock() self.core_plugin = mock.Mock() self.flavor_plugin = flavors_plugin.FlavorsPlugin() base_mgr.LoadbalancerBaseManager._lbv2_driver = self.lbv2_driver base_mgr.LoadbalancerBaseManager._core_plugin = self.core_plugin base_mgr.LoadbalancerBaseManager._flavor_plugin = self.flavor_plugin self._patch_lb_plugin(self.lbv2_driver, self._tested_entity) self.lb = lb_models.LoadBalancer(LB_ID, LB_TENANT_ID, 'lb-name', '', 'some-subnet', 'port-id', LB_VIP) self.listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'l-name', '', None, LB_ID, 'HTTP', protocol_port=80, loadbalancer=self.lb) self.sess_persist = lb_models.SessionPersistence(type='HTTP_COOKIE') self.pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool-name', '', None, 'HTTP', 'ROUND_ROBIN', loadbalancer_id=LB_ID, listener=self.listener, listeners=[self.listener], loadbalancer=self.lb, session_persistence=self.sess_persist) self.listener.default_pool = self.pool self.member = lb_models.Member(MEMBER_ID, LB_TENANT_ID, POOL_ID, MEMBER_ADDRESS, 80, 1, pool=self.pool) self.hm = lb_models.HealthMonitor(HM_ID, LB_TENANT_ID, 'PING', 3, 3, 1, pool=self.pool) self.l7policy = lb_models.L7Policy(L7POL_ID, LB_TENANT_ID, name='policy-test', description='policy-desc', listener_id=LISTENER_ID, action='REJECT', listener=self.listener, position=1) self.l7rule1 = lb_models.L7Rule(L7RULE_ID1, LB_TENANT_ID, l7policy_id=L7POL_ID, compare_type='EQUAL_TO', invert=False, type='HEADER', key='key1', value='val1', policy=self.l7policy) self.l7rule2 = lb_models.L7Rule(L7RULE_ID2, LB_TENANT_ID, l7policy_id=L7POL_ID, compare_type='STARTS_WITH', invert=True, type='PATH', value='/images', policy=self.l7policy) def tearDown(self): self._unpatch_lb_plugin(self.lbv2_driver, self._tested_entity) super(BaseTestEdgeLbaasV2, self).tearDown() def _patch_lb_plugin(self, lb_plugin, manager): self.real_manager = getattr(lb_plugin, manager) lb_manager = mock.patch.object(lb_plugin, manager).start() mock.patch.object(lb_manager, 'create').start() mock.patch.object(lb_manager, 'update').start() mock.patch.object(lb_manager, 'delete').start() mock.patch.object(lb_manager, 'successful_completion').start() def _unpatch_lb_plugin(self, lb_plugin, manager): setattr(lb_plugin, manager, self.real_manager) class TestEdgeLbaasV2Loadbalancer(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Loadbalancer, self).setUp() @property def _tested_entity(self): return 'load_balancer' def test_create(self): with mock.patch.object(lb_common, 'get_lbaas_edge_id' ) as mock_get_edge, \ mock.patch.object(lb_common, 'add_vip_fw_rule' ) as mock_add_vip_fwr, \ mock.patch.object(lb_common, 'set_lb_firewall_default_rule' ) as mock_set_fw_rule, \ mock.patch.object(lb_common, 'enable_edge_acceleration' ) as mock_enable_edge_acceleration, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding_by_edge' ) as mock_get_lb_binding_by_edge, \ mock.patch.object(nsxv_db, 'add_nsxv_lbaas_loadbalancer_binding' ) as mock_db_binding: mock_get_edge.return_value = LB_EDGE_ID mock_add_vip_fwr.return_value = LB_VIP_FWR_ID mock_get_lb_binding_by_edge.return_value = [] self.edge_driver.loadbalancer.create(self.context, self.lb) mock_add_vip_fwr.assert_called_with(self.edge_driver.vcns, LB_EDGE_ID, LB_ID, LB_VIP) mock_db_binding.assert_called_with(self.context.session, LB_ID, LB_EDGE_ID, LB_VIP_FWR_ID, LB_VIP) mock_set_fw_rule.assert_called_with( self.edge_driver.vcns, LB_EDGE_ID, 'accept') mock_get_edge.assert_called_with(mock.ANY, mock.ANY, LB_ID, LB_VIP, mock.ANY, LB_TENANT_ID, 'compact') mock_successful_completion = ( self.lbv2_driver.load_balancer.successful_completion) mock_successful_completion.assert_called_with(self.context, self.lb) mock_enable_edge_acceleration.assert_called_with( self.edge_driver.vcns, LB_EDGE_ID) def test_create_with_flavor(self): flavor_name = 'large' with mock.patch.object(lb_common, 'get_lbaas_edge_id' ) as mock_get_edge, \ mock.patch.object(lb_common, 'add_vip_fw_rule' ) as mock_add_vip_fwr, \ mock.patch.object(lb_common, 'set_lb_firewall_default_rule' ) as mock_set_fw_rule, \ mock.patch.object(lb_common, 'enable_edge_acceleration' ) as mock_enable_edge_acceleration, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding_by_edge' ) as mock_get_lb_binding_by_edge, \ mock.patch.object(nsxv_db, 'add_nsxv_lbaas_loadbalancer_binding' ) as mock_db_binding,\ mock.patch('neutron.services.flavors.flavors_plugin.FlavorsPlugin.' 'get_flavor', return_value={'name': flavor_name}): mock_get_edge.return_value = LB_EDGE_ID mock_add_vip_fwr.return_value = LB_VIP_FWR_ID mock_get_lb_binding_by_edge.return_value = [] self.lb.flavor_id = 'dummy' self.edge_driver.loadbalancer.create(self.context, self.lb) mock_add_vip_fwr.assert_called_with(self.edge_driver.vcns, LB_EDGE_ID, LB_ID, LB_VIP) mock_db_binding.assert_called_with(self.context.session, LB_ID, LB_EDGE_ID, LB_VIP_FWR_ID, LB_VIP) mock_set_fw_rule.assert_called_with( self.edge_driver.vcns, LB_EDGE_ID, 'accept') mock_get_edge.assert_called_with( mock.ANY, mock.ANY, LB_ID, LB_VIP, mock.ANY, LB_TENANT_ID, flavor_name) mock_successful_completion = ( self.lbv2_driver.load_balancer.successful_completion) mock_successful_completion.assert_called_with(self.context, self.lb) mock_enable_edge_acceleration.assert_called_with( self.edge_driver.vcns, LB_EDGE_ID) self.lb.flavor_id = None def test_create_with_illegal_flavor(self): flavor_name = 'no_size' with mock.patch.object(lb_common, 'get_lbaas_edge_id' ) as mock_get_edge, \ mock.patch.object(lb_common, 'add_vip_fw_rule' ) as mock_add_vip_fwr, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding_by_edge' ) as mock_get_lb_binding_by_edge, \ mock.patch('neutron.services.flavors.flavors_plugin.FlavorsPlugin.' 'get_flavor', return_value={'name': flavor_name}): mock_get_edge.return_value = LB_EDGE_ID mock_add_vip_fwr.return_value = LB_VIP_FWR_ID mock_get_lb_binding_by_edge.return_value = [] self.lb.flavor_id = 'dummy' self.assertRaises( n_exc.InvalidInput, self.edge_driver.loadbalancer.create, self.context, self.lb) self.lb.flavor_id = None def test_update(self): new_lb = lb_models.LoadBalancer(LB_ID, 'yyy-yyy', 'lb-name', 'heh-huh', 'some-subnet', 'port-id', LB_VIP) self.edge_driver.loadbalancer.update(self.context, self.lb, new_lb) mock_successful_completion = ( self.lbv2_driver.load_balancer.successful_completion) mock_successful_completion.assert_called_with(self.context, new_lb) def test_delete_old(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_binding, \ mock.patch.object(lb_common, 'del_vip_fw_rule') as mock_del_fwr, \ mock.patch.object(lb_common, 'del_vip_as_secondary_ip' ) as mock_vip_sec_ip, \ mock.patch.object(lb_common, 'set_lb_firewall_default_rule' ) as mock_set_fw_rule, \ mock.patch.object(nsxv_db, 'del_nsxv_lbaas_loadbalancer_binding', ) as mock_del_binding, \ mock.patch.object(self.core_plugin, 'get_ports' ) as mock_get_ports, \ mock.patch.object(self.core_plugin, 'get_router', return_value={'router_type': 'exclusive'}), \ mock.patch.object(nsxv_db, 'get_nsxv_router_binding_by_edge' ) as mock_get_r_binding: mock_get_binding.return_value = LB_BINDING mock_get_ports.return_value = [] mock_get_r_binding.return_value = {'router_id': 'xxxx'} self.edge_driver.loadbalancer.delete(self.context, self.lb) mock_del_fwr.assert_called_with(self.edge_driver.vcns, LB_EDGE_ID, LB_VIP_FWR_ID) mock_vip_sec_ip.assert_called_with(self.edge_driver.vcns, LB_EDGE_ID, LB_VIP) mock_del_binding.assert_called_with(self.context.session, LB_ID) mock_set_fw_rule.assert_called_with( self.edge_driver.vcns, LB_EDGE_ID, 'deny') mock_successful_completion = ( self.lbv2_driver.load_balancer.successful_completion) mock_successful_completion.assert_called_with(self.context, self.lb, delete=True) def test_delete_new(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_binding, \ mock.patch.object(lb_common, 'set_lb_firewall_default_rule' ) as mock_set_fw_rule, \ mock.patch.object(nsxv_db, 'del_nsxv_lbaas_loadbalancer_binding', ) as mock_del_binding, \ mock.patch.object(self.core_plugin, 'get_ports' ) as mock_get_ports, \ mock.patch.object(self.core_plugin.edge_manager, 'delete_lrouter' ) as mock_delete_lrouter, \ mock.patch.object(nsxv_db, 'get_nsxv_router_binding_by_edge' ) as mock_get_r_binding: mock_get_binding.return_value = LB_BINDING mock_get_ports.return_value = [] router_id = 'lbaas-xxxx' mock_get_r_binding.return_value = {'router_id': router_id} self.edge_driver.loadbalancer.delete(self.context, self.lb) mock_del_binding.assert_called_with(self.context.session, LB_ID) mock_set_fw_rule.assert_called_with( self.edge_driver.vcns, LB_EDGE_ID, 'deny') mock_delete_lrouter.assert_called_with( mock.ANY, 'lbaas-' + LB_ID, dist=False) mock_successful_completion = ( self.lbv2_driver.load_balancer.successful_completion) mock_successful_completion.assert_called_with(self.context, self.lb, delete=True) def test_stats(self): pass def test_refresh(self): pass class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Listener, self).setUp() @property def _tested_entity(self): return 'listener' def test_create(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(self.edge_driver.vcns, 'create_app_profile' ) as mock_create_app_prof, \ mock.patch.object(self.edge_driver.vcns, 'create_vip' ) as mock_create_vip, \ mock.patch.object(nsxv_db, 'add_nsxv_lbaas_listener_binding' ) as mock_add_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding', return_value=None): mock_get_lb_binding.return_value = LB_BINDING mock_create_app_prof.return_value = ( {'location': 'x/' + EDGE_APP_PROFILE_ID}, None) mock_create_vip.return_value = ( {'location': 'x/' + EDGE_VIP_ID}, None) self.edge_driver.listener.create(self.context, self.listener) mock_create_app_prof.assert_called_with(LB_EDGE_ID, EDGE_APP_PROF_DEF) mock_create_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_DEF) mock_add_binding.assert_called_with( self.context.session, LB_ID, LISTENER_ID, EDGE_APP_PROFILE_ID, EDGE_VIP_ID) mock_successful_completion = ( self.lbv2_driver.listener.successful_completion) mock_successful_completion.assert_called_with(self.context, self.listener) def test_update(self): new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'l-name', '', None, LB_ID, 'HTTP', protocol_port=8000, loadbalancer=self.lb) new_listener.default_pool = self.pool with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding', return_value=None), \ mock.patch.object(self.edge_driver.vcns, 'update_app_profile' ) as mock_upd_app_prof, \ mock.patch.object(self.edge_driver.vcns, 'update_vip' ) as mock_upd_vip: mock_get_listener_binding.return_value = LISTENER_BINDING mock_get_lb_binding.return_value = LB_BINDING self.edge_driver.listener.update( self.context, self.listener, new_listener) mock_upd_app_prof.assert_called_with(LB_EDGE_ID, EDGE_APP_PROFILE_ID, EDGE_APP_PROF_DEF) edge_vip_def = EDGE_VIP_DEF.copy() edge_vip_def['port'] = 8000 mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID, edge_vip_def) mock_successful_completion = ( self.lbv2_driver.listener.successful_completion) mock_successful_completion.assert_called_with(self.context, new_listener) def test_delete(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(self.edge_driver.vcns, 'delete_vip' ) as mock_del_vip, \ mock.patch.object(self.edge_driver.vcns, 'delete_app_profile' ) as mock_del_app_prof, \ mock.patch.object(nsxv_db, 'del_nsxv_lbaas_listener_binding' ) as mock_del_binding: mock_get_listener_binding.return_value = LISTENER_BINDING mock_get_lb_binding.return_value = LB_BINDING self.edge_driver.listener.delete(self.context, self.listener) mock_del_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID) mock_del_app_prof.assert_called_with(LB_EDGE_ID, EDGE_APP_PROFILE_ID) mock_del_binding.assert_called_with(self.context.session, LB_ID, LISTENER_ID) mock_successful_completion = ( self.lbv2_driver.listener.successful_completion) mock_successful_completion.assert_called_with(self.context, self.listener, delete=True) class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Pool, self).setUp() @property def _tested_entity(self): return 'pool' def test_create(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(self.edge_driver.vcns, 'create_pool' ) as mock_create_pool, \ mock.patch.object(nsxv_db, 'add_nsxv_lbaas_pool_binding' ) as mock_add_binding, \ mock.patch.object(self.edge_driver.vcns, 'update_vip' ) as mock_upd_vip,\ mock.patch.object(self.edge_driver.vcns, 'update_app_profile' ) as mock_upd_app_prof: mock_get_listener_binding.return_value = LISTENER_BINDING mock_get_lb_binding.return_value = LB_BINDING mock_create_pool.return_value = ( {'location': 'x/' + EDGE_POOL_ID}, None) self.edge_driver.pool.create(self.context, self.pool) mock_create_pool.assert_called_with(LB_EDGE_ID, EDGE_POOL_DEF.copy()) mock_add_binding.assert_called_with(self.context.session, LB_ID, POOL_ID, EDGE_POOL_ID) edge_vip_def = EDGE_VIP_DEF.copy() edge_vip_def['defaultPoolId'] = EDGE_POOL_ID mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID, edge_vip_def) mock_upd_app_prof.assert_called_with(LB_EDGE_ID, EDGE_APP_PROFILE_ID, EDGE_APP_PROF_DEF) mock_successful_completion = ( self.lbv2_driver.pool.successful_completion) mock_successful_completion.assert_called_with(self.context, self.pool) def test_update(self): new_pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool-name', '', None, 'HTTP', 'LEAST_CONNECTIONS', listener=self.listener) list_bind = {'app_profile_id': EDGE_APP_PROFILE_ID} with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding,\ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding', return_value=list_bind),\ mock.patch.object(self.edge_driver.vcns, 'update_pool' ) as mock_upd_pool,\ mock.patch.object(self.edge_driver.vcns, 'get_pool' ) as mock_get_pool,\ mock.patch.object(self.edge_driver.vcns, 'update_app_profile' ) as mock_upd_app_prof: mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING fake_edge = EDGE_POOL_DEF.copy() fake_edge['monitorId'] = 'monitor-7' fake_edge['member'] = ['member1', 'member2'] mock_get_pool.return_value = (None, fake_edge) self.edge_driver.pool.update(self.context, self.pool, new_pool) edge_pool_def = EDGE_POOL_DEF.copy() edge_pool_def['algorithm'] = 'leastconn' edge_pool_def['monitorId'] = 'monitor-7' edge_pool_def['member'] = ['member1', 'member2'] mock_upd_pool.assert_called_with( LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) mock_upd_app_prof.assert_called_with(LB_EDGE_ID, EDGE_APP_PROFILE_ID, EDGE_APP_PROF_DEF) mock_successful_completion = ( self.lbv2_driver.pool.successful_completion) mock_successful_completion.assert_called_with(self.context, new_pool) def test_delete(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding,\ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(self.edge_driver.vcns, 'update_vip' ) as mock_upd_vip, \ mock.patch.object(self.edge_driver.vcns, 'delete_pool' ) as mock_del_pool, \ mock.patch.object(nsxv_db, 'del_nsxv_lbaas_pool_binding' ) as mock_del_binding,\ mock.patch.object(self.edge_driver.vcns, 'update_app_profile' ): mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_get_listener_binding.return_value = LISTENER_BINDING self.edge_driver.pool.delete(self.context, self.pool) mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID, EDGE_VIP_DEF) mock_del_pool.assert_called_with(LB_EDGE_ID, EDGE_POOL_ID) mock_del_binding.assert_called_with( self.context.session, LB_ID, POOL_ID) mock_successful_completion = ( self.lbv2_driver.pool.successful_completion) mock_successful_completion.assert_called_with(self.context, self.pool, delete=True) class TestEdgeLbaasV2Member(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Member, self).setUp() @property def _tested_entity(self): return 'member' def test_create(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_router_binding_by_edge' ), \ mock.patch.object(self.edge_driver.vcns, 'get_pool' ) as mock_get_pool, \ mock.patch.object(self.edge_driver.vcns, 'update_pool' ) as mock_update_pool: mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_get_pool.return_value = (None, EDGE_POOL_DEF.copy()) self.edge_driver.member.create(self.context, self.member) edge_pool_def = EDGE_POOL_DEF.copy() edge_pool_def['member'] = [EDGE_MEMBER_DEF] mock_update_pool.assert_called_with( LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) mock_successful_completion = ( self.lbv2_driver.member.successful_completion) mock_successful_completion.assert_called_with(self.context, self.member) def test_update(self): new_member = lb_models.Member(MEMBER_ID, LB_TENANT_ID, POOL_ID, MEMBER_ADDRESS, 8000, 1, True, pool=self.pool) with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.edge_driver.vcns, 'get_pool' ) as mock_get_pool, \ mock.patch.object(self.edge_driver.vcns, 'update_pool' ) as mock_update_pool: mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING edge_pool_def = EDGE_POOL_DEF.copy() edge_pool_def['member'] = [EDGE_MEMBER_DEF] mock_get_pool.return_value = (None, edge_pool_def) self.edge_driver.member.update(self.context, self.member, new_member) edge_member_def = EDGE_MEMBER_DEF.copy() edge_member_def['port'] = 8000 edge_member_def['monitorPort'] = 8000 edge_member_def['condition'] = 'enabled' edge_pool_def['member'] = [edge_member_def] mock_update_pool.assert_called_with( LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) mock_successful_completion = ( self.lbv2_driver.member.successful_completion) mock_successful_completion.assert_called_with(self.context, new_member) def test_delete(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.edge_driver.vcns, 'get_pool' ) as mock_get_pool, \ mock.patch.object(self.core_plugin, 'get_ports' ) as mock_get_ports, \ mock.patch.object(lb_common, 'delete_lb_interface' ) as mock_del_lb_iface, \ mock.patch.object(self.edge_driver.vcns, 'update_pool' ) as mock_update_pool: mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING edge_pool_def = EDGE_POOL_DEF.copy() edge_pool_def['member'] = [EDGE_MEMBER_DEF] mock_get_pool.return_value = (None, edge_pool_def) mock_get_ports.return_value = [] self.edge_driver.member.delete(self.context, self.member) edge_pool_def['member'] = [] mock_update_pool.assert_called_with( LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) mock_del_lb_iface.assert_called_with( self.context, self.core_plugin, LB_ID, None) mock_successful_completion = ( self.lbv2_driver.member.successful_completion) mock_successful_completion.assert_called_with(self.context, self.member, delete=True) class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2HealthMonitor, self).setUp() @property def _tested_entity(self): return 'health_monitor' def test_create(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_monitor_binding' ) as mock_get_mon_binding, \ mock.patch.object(self.edge_driver.vcns, 'create_health_monitor' ) as mock_create_hm, \ mock.patch.object(nsxv_db, 'add_nsxv_lbaas_monitor_binding' ) as mock_add_hm_binding, \ mock.patch.object(self.edge_driver.vcns, 'get_pool' ) as mock_get_pool, \ mock.patch.object(self.edge_driver.vcns, 'update_pool' ) as mock_update_pool: mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_get_mon_binding.return_value = None mock_create_hm.return_value = ( {'location': 'x/' + EDGE_HM_ID}, None) mock_get_pool.return_value = (None, EDGE_POOL_DEF.copy()) self.edge_driver.healthmonitor.create(self.context, self.hm) mock_create_hm.assert_called_with(LB_EDGE_ID, EDGE_HM_DEF) mock_add_hm_binding.assert_called_with( self.context.session, LB_ID, POOL_ID, HM_ID, LB_EDGE_ID, EDGE_HM_ID) edge_pool_def = EDGE_POOL_DEF.copy() edge_pool_def['monitorId'] = [EDGE_HM_ID] mock_update_pool.assert_called_with( LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) mock_successful_completion = ( self.lbv2_driver.health_monitor.successful_completion) mock_successful_completion.assert_called_with(self.context, self.hm) def test_update(self): new_hm = lb_models.HealthMonitor(HM_ID, LB_TENANT_ID, 'PING', 3, 3, 3, pool=self.pool) with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_monitor_binding' ) as mock_get_mon_binding, \ mock.patch.object(self.edge_driver.vcns, 'update_health_monitor' ) as mock_upd_hm: mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_get_mon_binding.return_value = HM_BINDING self.edge_driver.healthmonitor.update( self.context, self.hm, new_hm) edge_hm_def = EDGE_HM_DEF.copy() edge_hm_def['maxRetries'] = 3 mock_upd_hm.assert_called_with(LB_EDGE_ID, EDGE_HM_ID, edge_hm_def) mock_successful_completion = ( self.lbv2_driver.health_monitor.successful_completion) mock_successful_completion.assert_called_with(self.context, new_hm) def test_delete(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_monitor_binding' ) as mock_get_mon_binding, \ mock.patch.object(self.edge_driver.vcns, 'delete_health_monitor' ) as mock_del_hm, \ mock.patch.object(self.edge_driver.vcns, 'get_pool' ) as mock_get_pool, \ mock.patch.object(self.edge_driver.vcns, 'update_pool' ) as mock_update_pool, \ mock.patch.object(nsxv_db, 'del_nsxv_lbaas_monitor_binding' ) as mock_del_binding: mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_get_mon_binding.return_value = HM_BINDING edge_pool_def = EDGE_POOL_DEF.copy() edge_pool_def['monitorId'] = [EDGE_HM_ID] mock_get_pool.return_value = (None, edge_pool_def) self.edge_driver.healthmonitor.delete( self.context, self.hm) mock_del_hm.assert_called_with(LB_EDGE_ID, EDGE_HM_ID) edge_pool_def['monitorId'] = [] mock_update_pool.assert_called_with( LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) mock_del_binding.assert_called_with(self.context.session, LB_ID, POOL_ID, HM_ID, LB_EDGE_ID) mock_successful_completion = ( self.lbv2_driver.health_monitor.successful_completion) mock_successful_completion.assert_called_with(self.context, self.hm, delete=True) class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2L7Policy, self).setUp() @property def _tested_entity(self): return 'l7policy' def test_create(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(nsxv_db, 'add_nsxv_lbaas_l7policy_binding' ) as mock_add_l7policy_binding,\ mock.patch.object(self.edge_driver.vcns, 'create_app_rule' ) as mock_create_rule, \ mock.patch.object(self.edge_driver.vcns, 'get_vip' ) as mock_get_vip, \ mock.patch.object(self.edge_driver.vcns, 'update_vip' ) as mock_upd_vip: mock_get_lb_binding.return_value = LB_BINDING mock_get_l7policy_binding.return_value = L7POL_BINDING mock_get_listener_binding.return_value = LISTENER_BINDING mock_create_rule.return_value = ( {'location': 'x/' + EDGE_RULE_ID}, None) mock_get_vip.return_value = (None, EDGE_VIP_DEF.copy()) self.edge_driver.l7policy.create(self.context, self.l7policy) mock_create_rule.assert_called_with(LB_EDGE_ID, EDGE_L7POL_DEF.copy()) mock_add_l7policy_binding.assert_called_with( self.context.session, L7POL_ID, LB_EDGE_ID, EDGE_RULE_ID) edge_vip_def = EDGE_VIP_DEF.copy() edge_vip_def['applicationRuleId'] = [EDGE_RULE_ID] mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID, edge_vip_def) mock_successful_completion = ( self.lbv2_driver.l7policy.successful_completion) mock_successful_completion.assert_called_with(self.context, self.l7policy) def test_update(self): url = 'http://www.test.com' new_pol = lb_models.L7Policy(L7POL_ID, LB_TENANT_ID, name='policy-test', description='policy-desc', listener_id=LISTENER_ID, action='REDIRECT_TO_URL', redirect_url=url, listener=self.listener, position=2) with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(self.edge_driver.vcns, 'get_vip' ) as mock_get_vip, \ mock.patch.object(self.edge_driver.vcns, 'update_vip' ) as mock_upd_vip, \ mock.patch.object(self.edge_driver.vcns, 'update_app_rule' ) as mock_update_rule: mock_get_lb_binding.return_value = LB_BINDING mock_get_l7policy_binding.return_value = L7POL_BINDING mock_get_listener_binding.return_value = LISTENER_BINDING edge_vip_def = EDGE_VIP_DEF.copy() edge_vip_def['applicationRuleId'] = [EDGE_RULE_ID] mock_get_vip.return_value = (None, edge_vip_def) self.edge_driver.l7policy.update(self.context, self.l7policy, new_pol) edge_rule_def = EDGE_L7POL_DEF.copy() edge_rule_def['script'] = "redirect location %s if TRUE" % url mock_update_rule.assert_called_with( LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def) mock_upd_vip.assert_called() mock_successful_completion = ( self.lbv2_driver.l7policy.successful_completion) mock_successful_completion.assert_called_with(self.context, new_pol) def test_delete(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(nsxv_db, 'del_nsxv_lbaas_l7policy_binding' ) as mock_del_l7policy_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding,\ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(self.edge_driver.vcns, 'delete_app_rule' ) as mock_del_app_rule, \ mock.patch.object(self.edge_driver.vcns, 'get_vip' ) as mock_get_vip, \ mock.patch.object(self.edge_driver.vcns, 'update_vip' ) as mock_upd_vip: mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_get_listener_binding.return_value = LISTENER_BINDING mock_get_l7policy_binding.return_value = L7POL_BINDING edge_vip_def = EDGE_VIP_DEF.copy() edge_vip_def['applicationRuleId'] = [EDGE_RULE_ID] mock_get_vip.return_value = (None, edge_vip_def) self.edge_driver.l7policy.delete(self.context, self.l7policy) edge_vip_def2 = EDGE_VIP_DEF.copy() edge_vip_def2['applicationRuleId'] = [] mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID, edge_vip_def2) mock_del_app_rule.assert_called_with(LB_EDGE_ID, EDGE_RULE_ID) mock_del_l7policy_binding.assert_called_with( self.context.session, L7POL_ID) mock_successful_completion = ( self.lbv2_driver.l7policy.successful_completion) mock_successful_completion.assert_called_with(self.context, self.l7policy, delete=True) class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2L7Rule, self).setUp() @property def _tested_entity(self): return 'l7rule' def test_create(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(self.edge_driver.vcns, 'update_app_rule' ) as mock_update_rule: mock_get_l7policy_binding.return_value = L7POL_BINDING # Create the first rule self.l7rule1.policy.rules = [self.l7rule1] self.edge_driver.l7rule.create(self.context, self.l7rule1) edge_rule_def = EDGE_L7POL_DEF.copy() edge_rule_def['script'] = ( "acl %(rule_id)s hdr(key1) -i val1\n" "http-request deny if %(rule_id)s" % {'rule_id': L7RULE_ID1}) mock_update_rule.assert_called_with( LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def) mock_successful_completion = ( self.lbv2_driver.l7rule.successful_completion) mock_successful_completion.assert_called_with( self.context, self.l7rule1, delete=False) # Create the 2nd rule self.l7rule2.policy.rules = [self.l7rule1, self.l7rule2] self.edge_driver.l7rule.create(self.context, self.l7rule2) edge_rule_def = EDGE_L7POL_DEF.copy() edge_rule_def['script'] = ( "acl %(rule_id1)s hdr(key1) -i val1\n" "acl %(rule_id2)s path_beg -i /images\n" "http-request deny if %(rule_id1)s !%(rule_id2)s" % {'rule_id1': L7RULE_ID1, 'rule_id2': L7RULE_ID2}) mock_update_rule.assert_called_with( LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def) mock_successful_completion = ( self.lbv2_driver.l7rule.successful_completion) mock_successful_completion.assert_called_with( self.context, self.l7rule2, delete=False) def test_update(self): new_rule = lb_models.L7Rule(L7RULE_ID1, LB_TENANT_ID, l7policy_id=L7POL_ID, compare_type='EQUAL_TO', invert=False, type='HEADER', key='key2', value='val1', policy=self.l7policy) with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(self.edge_driver.vcns, 'update_app_rule' ) as mock_update_rule: mock_get_l7policy_binding.return_value = L7POL_BINDING new_rule.policy.rules = [new_rule] self.edge_driver.l7rule.update( self.context, self.l7rule1, new_rule) edge_rule_def = EDGE_L7POL_DEF.copy() edge_rule_def['script'] = ( "acl %(rule_id)s hdr(key2) -i val1\n" "http-request deny if %(rule_id)s" % {'rule_id': L7RULE_ID1}) mock_update_rule.assert_called_with( LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def) mock_successful_completion = ( self.lbv2_driver.l7rule.successful_completion) mock_successful_completion.assert_called_with( self.context, new_rule, delete=False) def test_delete(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(self.edge_driver.vcns, 'update_app_rule' ) as mock_update_rule: mock_get_l7policy_binding.return_value = L7POL_BINDING self.l7rule1.policy.rules = [] self.edge_driver.l7rule.delete(self.context, self.l7rule1) edge_rule_def = EDGE_L7POL_DEF.copy() edge_rule_def['script'] = ( "http-request deny if TRUE") mock_update_rule.assert_called_with( LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def) mock_successful_completion = ( self.lbv2_driver.l7rule.successful_completion) mock_successful_completion.assert_called_with( self.context, self.l7rule1, delete=True) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/test_fwaas_driver.py0000666000175100017510000002441713244523345025501 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron_lib.exceptions import firewall_v1 as exceptions from oslo_utils import uuidutils from vmware_nsx.services.fwaas.nsx_v import edge_fwaas_driver from vmware_nsx.tests.unit.nsx_v import test_plugin as test_v_plugin FAKE_FW_ID = 'fake_fw_uuid' class NsxvFwaasTestCase(test_v_plugin.NsxVPluginV2TestCase): def setUp(self): super(NsxvFwaasTestCase, self).setUp() self.firewall = edge_fwaas_driver.EdgeFwaasDriver() def _fake_rules_v4(self): rule1 = {'enabled': True, 'action': 'allow', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '80', 'source_port': '1-65535', 'source_ip_address': '10.24.4.2', 'id': 'fake-fw-rule1'} rule2 = {'enabled': True, 'action': 'deny', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '22', 'id': 'fake-fw-rule2'} rule3 = {'enabled': True, 'action': 'reject', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '23', 'id': 'fake-fw-rule3'} return [rule1, rule2, rule3] def _fake_backend_rules_v4(self, logged=False): rule1 = {'enabled': True, 'action': 'allow', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '80', 'source_port': '1-65535', 'source_ip_address': ['10.24.4.2'], 'position': '0', 'id': 'fake-fw-rule1', 'name': 'Fwaas-fake-fw-rule1'} rule2 = {'enabled': True, 'action': 'deny', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '22', 'id': 'fake-fw-rule2', 'position': '1', 'name': 'Fwaas-fake-fw-rule2'} rule3 = {'enabled': True, 'action': 'reject', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '23', 'position': '2', 'id': 'fake-fw-rule3', 'name': 'Fwaas-fake-fw-rule3'} if logged: for rule in (rule1, rule2, rule3): rule['loggingEnabled'] = logged return [rule1, rule2, rule3] def _fake_firewall_no_rule(self): rule_list = [] fw_inst = {'id': FAKE_FW_ID, 'admin_state_up': True, 'tenant_id': 'tenant-uuid', 'firewall_rule_list': rule_list} return fw_inst def _fake_firewall(self, rule_list): _rule_list = copy.deepcopy(rule_list) for rule in _rule_list: rule['position'] = str(_rule_list.index(rule)) fw_inst = {'id': FAKE_FW_ID, 'admin_state_up': True, 'tenant_id': 'tenant-uuid', 'firewall_rule_list': _rule_list} return fw_inst def _fake_firewall_with_admin_down(self, rule_list): fw_inst = {'id': FAKE_FW_ID, 'admin_state_up': False, 'tenant_id': 'tenant-uuid', 'firewall_rule_list': rule_list} return fw_inst def _fake_apply_list(self, router_count=1): apply_list = [] while router_count > 0: rtr_id = uuidutils.generate_uuid() router_inst = {'id': rtr_id} router_info_inst = mock.Mock() router_info_inst.router = router_inst router_info_inst.router_id = rtr_id apply_list.append(router_info_inst) router_count -= 1 return apply_list def _get_fake_mapping(self, apply_list): router_edge_map = {} for router_info in apply_list: router_edge_map[router_info.router_id] = { 'edge_id': 'edge-1', 'lookup_id': router_info.router_id} return router_edge_map def _setup_firewall_with_rules(self, func, router_count=1): apply_list = self._fake_apply_list(router_count=router_count) rule_list = self._fake_rules_v4() firewall = self._fake_firewall(rule_list) edges = self._get_fake_mapping(apply_list) with mock.patch("vmware_nsx.plugins.nsx_v.plugin.NsxVPluginV2." "update_router_firewall") as update_fw,\ mock.patch("vmware_nsx.plugins.nsx_v.plugin.NsxVPluginV2." "_get_router"),\ mock.patch.object(self.firewall, "_get_routers_edges", return_value=edges): func('nsx', apply_list, firewall) self.assertEqual(router_count, update_fw.call_count) # Validate the args of the last call self.assertEqual(apply_list[-1].router_id, update_fw.call_args[0][1]) backend_rules = update_fw.call_args[1]['fwaas_rules'] self.assertEqual(len(rule_list), len(backend_rules)) self.assertEqual(self._fake_backend_rules_v4(), backend_rules) def test_create_firewall_no_rules(self): apply_list = self._fake_apply_list() firewall = self._fake_firewall_no_rule() edges = self._get_fake_mapping(apply_list) with mock.patch("vmware_nsx.plugins.nsx_v.plugin.NsxVPluginV2." "update_router_firewall") as update_fw,\ mock.patch("vmware_nsx.plugins.nsx_v.plugin.NsxVPluginV2." "_get_router"),\ mock.patch.object(self.firewall, "_get_routers_edges", return_value=edges): self.firewall.create_firewall('nsx', apply_list, firewall) self.assertEqual(1, update_fw.call_count) # Validate the args of the last call self.assertEqual(apply_list[0].router_id, update_fw.call_args[0][1]) backend_rules = update_fw.call_args[1]['fwaas_rules'] self.assertEqual([], backend_rules) def test_create_firewall_with_rules(self): self._setup_firewall_with_rules(self.firewall.create_firewall) def test_create_firewall_with_rules_two_routers(self): self._setup_firewall_with_rules(self.firewall.create_firewall, router_count=2) def test_update_firewall_with_rules(self): self._setup_firewall_with_rules(self.firewall.update_firewall) def test_delete_firewall(self): apply_list = self._fake_apply_list() firewall = self._fake_firewall_no_rule() edges = self._get_fake_mapping(apply_list) with mock.patch("vmware_nsx.plugins.nsx_v.plugin.NsxVPluginV2." "update_router_firewall") as update_fw,\ mock.patch("vmware_nsx.plugins.nsx_v.plugin.NsxVPluginV2." "_get_router"),\ mock.patch.object(self.firewall, "_get_routers_edges", return_value=edges): self.firewall.delete_firewall('nsx', apply_list, firewall) self.assertEqual(1, update_fw.call_count) # Validate the args of the last call self.assertEqual(apply_list[0].router_id, update_fw.call_args[0][1]) backend_rules = update_fw.call_args[1]['fwaas_rules'] self.assertIsNone(backend_rules) def test_create_firewall_with_admin_down(self): apply_list = self._fake_apply_list() rule_list = self._fake_rules_v4() firewall = self._fake_firewall_with_admin_down(rule_list) edges = self._get_fake_mapping(apply_list) with mock.patch("vmware_nsx.plugins.nsx_v.plugin.NsxVPluginV2." "update_router_firewall") as update_fw,\ mock.patch("vmware_nsx.plugins.nsx_v.plugin.NsxVPluginV2." "_get_router"),\ mock.patch.object(self.firewall, "_get_routers_edges", return_value=edges): self.firewall.create_firewall('nsx', apply_list, firewall) self.assertEqual(1, update_fw.call_count) # Validate the args of the last call self.assertEqual(apply_list[0].router_id, update_fw.call_args[0][1]) backend_rules = update_fw.call_args[1]['fwaas_rules'] self.assertEqual([], backend_rules) def test_should_apply_firewall_to_router(self): router = {'id': 'fake_id', 'external_gateway_info': 'fake_data', 'router_type': 'exclusive', 'distributed': False} self.assertTrue(self.firewall.should_apply_firewall_to_router(router)) # no external gateway: router['external_gateway_info'] = None self.assertFalse(self.firewall.should_apply_firewall_to_router(router)) router['external_gateway_info'] = 'Dummy' # not for shared router: router['router_type'] = 'shared' router['distributed'] = False self.assertRaises(exceptions.FirewallInternalDriverError, self.firewall.should_apply_firewall_to_router, router) # should work for distributed router router['router_type'] = 'exclusive' router['distributed'] = True self.assertTrue(self.firewall.should_apply_firewall_to_router(router)) # not for mdproxy router: router['name'] = 'metadata_proxy_router' self.assertRaises(exceptions.FirewallInternalDriverError, self.firewall.should_apply_firewall_to_router, router) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/__init__.py0000666000175100017510000000000013244523345023503 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/test_availability_zones.py0000666000175100017510000003011613244523345026706 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.tests import base from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az DEF_AZ_POOL = ['service:compact:1:2', 'vdr:compact:1:2'] DEF_GLOBAL_POOL = ['service:compact:4:10', 'vdr:compact:4:10'] class NsxvAvailabilityZonesTestCase(base.BaseTestCase): def setUp(self): super(NsxvAvailabilityZonesTestCase, self).setUp() self.az_name = 'zone1' self.group_name = 'az:%s' % self.az_name config.register_nsxv_azs(cfg.CONF, [self.az_name]) cfg.CONF.set_override("ha_placement_random", True, group="nsxv") cfg.CONF.set_override("mgt_net_proxy_ips", ["2.2.2.2"], group="nsxv") cfg.CONF.set_override("dvs_id", "dvs-1", group="nsxv") def _config_az(self, resource_pool_id="respool", datastore_id="datastore", edge_ha=True, ha_datastore_id="hastore", backup_edge_pool=DEF_AZ_POOL, ha_placement_random=False, datacenter_moid="datacenter", mgt_net_moid="portgroup-407", mgt_net_proxy_ips=["1.1.1.1"], mgt_net_proxy_netmask="255.255.255.0", mgt_net_default_gateway="2.2.2.2", external_network="network-17", vdn_scope_id="vdnscope-1", dvs_id="dvs-2"): cfg.CONF.set_override("resource_pool_id", resource_pool_id, group=self.group_name) cfg.CONF.set_override("datastore_id", datastore_id, group=self.group_name) if edge_ha is not None: cfg.CONF.set_override("edge_ha", edge_ha, group=self.group_name) cfg.CONF.set_override("ha_datastore_id", ha_datastore_id, group=self.group_name) if ha_placement_random is not None: cfg.CONF.set_override("ha_placement_random", ha_placement_random, group=self.group_name) if datacenter_moid is not None: cfg.CONF.set_override("datacenter_moid", datacenter_moid, group=self.group_name) if backup_edge_pool is not None: cfg.CONF.set_override("backup_edge_pool", backup_edge_pool, group=self.group_name) if mgt_net_moid is not None: cfg.CONF.set_override("mgt_net_moid", mgt_net_moid, group=self.group_name) if mgt_net_proxy_ips is not None: cfg.CONF.set_override("mgt_net_proxy_ips", mgt_net_proxy_ips, group=self.group_name) if mgt_net_proxy_netmask is not None: cfg.CONF.set_override("mgt_net_proxy_netmask", mgt_net_proxy_netmask, group=self.group_name) if mgt_net_default_gateway is not None: cfg.CONF.set_override("mgt_net_default_gateway", mgt_net_default_gateway, group=self.group_name) if external_network is not None: cfg.CONF.set_override("external_network", external_network, group=self.group_name) if vdn_scope_id is not None: cfg.CONF.set_override("vdn_scope_id", vdn_scope_id, group=self.group_name) if dvs_id is not None: cfg.CONF.set_override("dvs_id", dvs_id, group=self.group_name) def test_simple_availability_zone(self): self._config_az() az = nsx_az.NsxVAvailabilityZone(self.az_name) self.assertEqual(self.az_name, az.name) self.assertEqual("respool", az.resource_pool) self.assertEqual("datastore", az.datastore_id) self.assertTrue(az.edge_ha) self.assertEqual("hastore", az.ha_datastore_id) self.assertFalse(az.ha_placement_random) self.assertEqual("datacenter", az.datacenter_moid) self.assertEqual(DEF_AZ_POOL, az.backup_edge_pool) self.assertEqual("portgroup-407", az.mgt_net_moid) self.assertEqual(["1.1.1.1"], az.mgt_net_proxy_ips) self.assertEqual("255.255.255.0", az.mgt_net_proxy_netmask) self.assertEqual("2.2.2.2", az.mgt_net_default_gateway) self.assertEqual("network-17", az.external_network) self.assertEqual("vdnscope-1", az.vdn_scope_id) self.assertEqual("dvs-2", az.dvs_id) self.assertTrue(az.az_metadata_support) def test_availability_zone_no_edge_ha(self): self._config_az(edge_ha=False) az = nsx_az.NsxVAvailabilityZone(self.az_name) self.assertEqual(self.az_name, az.name) self.assertEqual("respool", az.resource_pool) self.assertEqual("datastore", az.datastore_id) self.assertFalse(az.edge_ha) self.assertIsNone(az.ha_datastore_id) self.assertFalse(az.ha_placement_random) def test_availability_zone_no_ha_datastore(self): self._config_az(ha_datastore_id=None) az = nsx_az.NsxVAvailabilityZone(self.az_name) self.assertEqual(self.az_name, az.name) self.assertEqual("respool", az.resource_pool) self.assertEqual("datastore", az.datastore_id) self.assertTrue(az.edge_ha) self.assertIsNone(az.ha_datastore_id) self.assertFalse(az.ha_placement_random) def test_missing_group_section(self): self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, "doesnt_exist") def test_availability_zone_missing_respool(self): self._config_az(resource_pool_id=None) self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, self.az_name) def test_availability_zone_missing_datastore(self): self._config_az(datastore_id=None) self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, self.az_name) def test_availability_zone_missing_edge_ha(self): self._config_az(edge_ha=None) az = nsx_az.NsxVAvailabilityZone(self.az_name) self.assertEqual(self.az_name, az.name) self.assertEqual("respool", az.resource_pool) self.assertEqual("datastore", az.datastore_id) self.assertFalse(az.edge_ha) self.assertIsNone(az.ha_datastore_id) self.assertFalse(az.ha_placement_random) def test_availability_zone_missing_edge_placement(self): self._config_az(ha_placement_random=None) az = nsx_az.NsxVAvailabilityZone(self.az_name) self.assertEqual(self.az_name, az.name) self.assertEqual("respool", az.resource_pool) self.assertEqual("datastore", az.datastore_id) self.assertTrue(az.edge_ha) self.assertEqual("hastore", az.ha_datastore_id) # ha_placement_random should have the global value self.assertTrue(az.ha_placement_random) def test_availability_zone_missing_backup_pool(self): self._config_az(backup_edge_pool=None) az = nsx_az.NsxVAvailabilityZone(self.az_name) self.assertEqual(self.az_name, az.name) # Should use the global configuration instead self.assertEqual(DEF_GLOBAL_POOL, az.backup_edge_pool) def test_availability_zone_missing_metadata(self): self._config_az(mgt_net_proxy_ips=None) az = nsx_az.NsxVAvailabilityZone(self.az_name) self.assertIsNone(az.mgt_net_moid) self.assertEqual([], az.mgt_net_proxy_ips) self.assertIsNone(az.mgt_net_proxy_netmask) self.assertIsNone(az.mgt_net_default_gateway) self.assertFalse(az.az_metadata_support) def test_availability_zone_same_metadata(self): self._config_az(mgt_net_proxy_ips=["2.2.2.2"]) self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, self.az_name) self._config_az(mgt_net_proxy_ips=["2.2.2.2", "3.3.3.3"]) self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, self.az_name) class NsxvAvailabilityZonesOldTestCase(base.BaseTestCase): """Test old way of configuring the availability zones using a one-line configuration instead of different dynamic sections """ def setUp(self): super(NsxvAvailabilityZonesOldTestCase, self).setUp() cfg.CONF.set_override("mgt_net_proxy_ips", ["2.2.2.2"], group="nsxv") cfg.CONF.set_override("dvs_id", "dvs-1", group="nsxv") def test_simple_availability_zone(self): az = nsx_az.NsxVAvailabilityZone( "name:respool:datastore:true:hastore") self.assertEqual("name", az.name) self.assertEqual("respool", az.resource_pool) self.assertEqual("datastore", az.datastore_id) self.assertTrue(az.edge_ha) self.assertEqual("hastore", az.ha_datastore_id) self.assertFalse(az.ha_placement_random) self.assertEqual(DEF_GLOBAL_POOL, az.backup_edge_pool) # should get the global configuration (which is empty now) self.assertIsNone(az.external_network) self.assertIsNone(az.vdn_scope_id) self.assertEqual("dvs-1", az.dvs_id) # no metadata per az support self.assertFalse(az.az_metadata_support) self.assertIsNone(az.mgt_net_moid) self.assertEqual([], az.mgt_net_proxy_ips) self.assertIsNone(az.mgt_net_proxy_netmask) self.assertIsNone(az.mgt_net_default_gateway) def test_availability_zone_without_ha_datastore(self): az = nsx_az.NsxVAvailabilityZone( "name:respool:datastore:true") self.assertEqual("name", az.name) self.assertEqual("respool", az.resource_pool) self.assertEqual("datastore", az.datastore_id) self.assertTrue(az.edge_ha) self.assertIsNone(az.ha_datastore_id) def test_availability_zone_without_edge_ha(self): az = nsx_az.NsxVAvailabilityZone( "name:respool:datastore:FALSE") self.assertEqual("name", az.name) self.assertEqual("respool", az.resource_pool) self.assertEqual("datastore", az.datastore_id) self.assertFalse(az.edge_ha) self.assertIsNone(az.ha_datastore_id) def test_availability_fail_long_name(self): self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, "very-very-very-very-very-longest-name:respool:da:true:ha") def test_availability_fail_few_args(self): self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, "name:respool") def test_availability_fail_many_args(self): self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, "name:1:2:3:4:5:6") def test_availability_fail_bad_edge_ha(self): self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, "name:respool:datastore:truex:hastore") def test_availability_fail_no_ha_datastore(self): self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, "name:respool:datastore:false:hastore") vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/test_plugin.py0000666000175100017510000105236313244523345024325 0ustar zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import copy from eventlet import greenthread import mock import netaddr from neutron.extensions import address_scope from neutron.extensions import l3 from neutron.extensions import securitygroup as secgrp from neutron.plugins.common import utils from neutron.tests.unit import _test_extension_portbindings as test_bindings import neutron.tests.unit.db.test_allowedaddresspairs_db as test_addr_pair import neutron.tests.unit.db.test_db_base_plugin_v2 as test_plugin from neutron.tests.unit.extensions import base as extension from neutron.tests.unit.extensions import test_address_scope from neutron.tests.unit.extensions import test_extra_dhcp_opt as test_dhcpopts import neutron.tests.unit.extensions.test_l3 as test_l3_plugin import neutron.tests.unit.extensions.test_l3_ext_gw_mode as test_ext_gw_mode import neutron.tests.unit.extensions.test_portsecurity as test_psec import neutron.tests.unit.extensions.test_securitygroup as ext_sg from neutron.tests.unit import testlib_api from neutron_lib.api.definitions import allowedaddresspairs as addrp_apidef from neutron_lib.api.definitions import dvr as dvr_apidef from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import l3_ext_gw_mode as l3_egm_apidef from neutron_lib.api.definitions import l3_flavors as l3fav_apidef from neutron_lib.api.definitions import port_security as psec from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api.definitions import router_availability_zone as raz_apidef from neutron_lib.api import validators from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import constants as plugin_const from neutron_lib.plugins import directory from neutron_lib.services.qos import constants as qos_consts from neutron_lib.utils import helpers from neutron_lib.utils import net from oslo_config import cfg from oslo_utils import uuidutils import six import webob.exc from vmware_nsx._i18n import _ from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.common import nsx_constants from vmware_nsx.common import utils as c_utils from vmware_nsx.db import nsxv_db from vmware_nsx.dvs import dvs from vmware_nsx.dvs import dvs_utils from vmware_nsx.extensions import projectpluginmap from vmware_nsx.extensions import routersize as router_size from vmware_nsx.extensions import routertype as router_type from vmware_nsx.extensions import vnicindex as ext_vnic_idx from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.drivers import ( distributed_router_driver as dist_router_driver) from vmware_nsx.plugins.nsx_v.drivers import ( exclusive_router_driver as ex_router_driver) from vmware_nsx.plugins.nsx_v.drivers import ( shared_router_driver as router_driver) from vmware_nsx.plugins.nsx_v import md_proxy from vmware_nsx.plugins.nsx_v.vshield.common import constants as vcns_const from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as vcns_exc from vmware_nsx.plugins.nsx_v.vshield import edge_appliance_driver from vmware_nsx.plugins.nsx_v.vshield import edge_firewall_driver from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.services.qos.nsx_v import utils as qos_utils from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.extensions import test_vnic_index from vmware_nsx.tests.unit.nsx_v.vshield import fake_vcns from vmware_nsx.tests.unit import test_utils PLUGIN_NAME = 'vmware_nsx.plugin.NsxVPlugin' _uuid = uuidutils.generate_uuid def set_az_in_config(name, resource_pool_id="respool-7", datastore_id="datastore-7", edge_ha=False, ha_datastore_id=None): group_name = 'az:%s' % name cfg.CONF.set_override('availability_zones', [name], group="nsxv") config.register_nsxv_azs(cfg.CONF, [name]) cfg.CONF.set_override("resource_pool_id", resource_pool_id, group=group_name) cfg.CONF.set_override("datastore_id", datastore_id, group=group_name) cfg.CONF.set_override("edge_ha", edge_ha, group=group_name) cfg.CONF.set_override("ha_datastore_id", ha_datastore_id, group=group_name) class NsxVPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): def _create_network(self, fmt, name, admin_state_up, arg_list=None, providernet_args=None, set_context=False, tenant_id=None, **kwargs): tenant_id = tenant_id or self._tenant_id data = {'network': {'name': name, 'admin_state_up': admin_state_up, 'tenant_id': tenant_id}} # Fix to allow the router:external attribute and any other # attributes containing a colon to be passed with # a double underscore instead kwargs = dict((k.replace('__', ':'), v) for k, v in kwargs.items()) if extnet_apidef.EXTERNAL in kwargs: arg_list = (extnet_apidef.EXTERNAL, ) + (arg_list or ()) attrs = kwargs if providernet_args: attrs.update(providernet_args) for arg in (('admin_state_up', 'tenant_id', 'shared') + (arg_list or ())): # Arg must be present and not empty if arg in kwargs: data['network'][arg] = kwargs[arg] network_req = self.new_create_request('networks', data, fmt) if set_context and tenant_id: # create a specific auth context for this request network_req.environ['neutron.context'] = context.Context( '', tenant_id) return network_req.get_response(self.api) @contextlib.contextmanager def subnet(self, network=None, **kwargs): # Override the subnet method to automatically disable dhcp on external # subnets or ipv6 subnets, unless specified. set_context = kwargs.get('set_context', False) with test_plugin.optional_ctx( network, self.network, set_context=set_context, tenant_id=kwargs.get('tenant_id')) as network_to_use: if 'enable_dhcp' not in kwargs: if kwargs.get('ip_version') == 6: kwargs['enable_dhcp'] = False else: # Read the network itself, as the network in the args # does not content this value net = self._show('networks', network_to_use['network']['id']) if net['network']['router:external']: kwargs['enable_dhcp'] = False subnet = self._make_subnet(self.fmt, network_to_use, kwargs.get( 'gateway_ip', constants.ATTR_NOT_SPECIFIED), kwargs.get('cidr', '10.0.0.0/24'), kwargs.get('subnetpool_id'), kwargs.get('allocation_pools'), kwargs.get('ip_version', 4), kwargs.get('enable_dhcp', True), kwargs.get('dns_nameservers'), kwargs.get('host_routes'), segment_id=kwargs.get('segment_id'), shared=kwargs.get('shared'), ipv6_ra_mode=kwargs.get('ipv6_ra_mode'), ipv6_address_mode=kwargs.get( 'ipv6_address_mode'), tenant_id=kwargs.get('tenant_id'), set_context=set_context) yield subnet @mock.patch.object(edge_utils.EdgeManager, '_deploy_edge') def setUp(self, mock_deploy_edge, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): test_utils.override_nsx_ini_test() mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) mock_vcns_instance = mock_vcns.start() self.fc2 = fake_vcns.FakeVcns() mock_vcns_instance.return_value = self.fc2 edge_utils.query_dhcp_service_config = mock.Mock(return_value=[]) self.mock_create_dhcp_service = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, 'create_dhcp_edge_service')) self.mock_create_dhcp_service.start() mock_update_dhcp_service = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, 'update_dhcp_edge_service')) mock_update_dhcp_service.start() mock_delete_dhcp_service = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, 'delete_dhcp_edge_service')) mock_delete_dhcp_service.start() mock_check_backup_edge_pools = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, '_check_backup_edge_pools')) mock_check_backup_edge_pools.start() mock_deploy_backup_edges_at_backend = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, '_deploy_backup_edges_at_backend')) mock_deploy_backup_edges_at_backend.start() mock_process_security_group_logging = mock.patch( 'vmware_nsx.plugin.NsxVPlugin.' '_process_security_groups_rules_logging') mock_process_security_group_logging.start() self.default_res_pool = 'respool-28' cfg.CONF.set_override("resource_pool_id", self.default_res_pool, group="nsxv") set_az_in_config('az7') if service_plugins is not None: # override the service plugins only if specified directly super(NsxVPluginV2TestCase, self).setUp( plugin=plugin, service_plugins=service_plugins, ext_mgr=ext_mgr) else: super(NsxVPluginV2TestCase, self).setUp( plugin=plugin, ext_mgr=ext_mgr) self.addCleanup(self.fc2.reset_all) plugin_instance = directory.get_plugin() # handle TVD plugin case if plugin_instance.is_tvd_plugin(): plugin_instance = plugin_instance.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_V) plugin_instance.real_get_edge = plugin_instance._get_edge_id_by_rtr_id plugin_instance._get_edge_id_by_rtr_id = mock.Mock() plugin_instance._get_edge_id_by_rtr_id.return_value = False plugin_instance._get_edge_id_and_az_by_rtr_id = mock.Mock() plugin_instance._get_edge_id_and_az_by_rtr_id.return_value = ( False, False) # call init_complete manually. The event is not called in unit tests plugin_instance.init_complete(None, None, {}) def _get_core_plugin_with_dvs(self): # enable dvs features to allow policy with QOS cfg.CONF.set_default('use_dvs_features', True, 'nsxv') plugin = directory.get_plugin() with mock.patch.object(dvs_utils, 'dvs_create_session'): plugin._vcm = dvs.VCManager() return plugin class TestNetworksV2(test_plugin.TestNetworksV2, NsxVPluginV2TestCase): def _test_create_bridge_network(self, vlan_id=0): net_type = vlan_id and 'vlan' or 'flat' name = 'bridge_net' expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (pnet.NETWORK_TYPE, net_type), (pnet.PHYSICAL_NETWORK, 'tzuuid'), (pnet.SEGMENTATION_ID, vlan_id)] providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: 'tzuuid'} if vlan_id: providernet_args[pnet.SEGMENTATION_ID] = vlan_id with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)) as net: for k, v in expected: self.assertEqual(net['network'][k], v) def test_create_bridge_network(self): self._test_create_bridge_network() def test_create_bridge_vlan_network(self): self._test_create_bridge_network(vlan_id=123) def test_get_vlan_network_name(self): p = directory.get_plugin() net_id = uuidutils.generate_uuid() dvs_id = 'dvs-10' net = {'name': '', 'id': net_id} # Empty net['name'] should yield dvs_id-net_id as a name for the # port group. expected = '%s-%s' % (dvs_id, net_id) self.assertEqual(expected, p._get_vlan_network_name(net, dvs_id)) # If network name is provided then it should yield # dvs_id-net_name-net_id as a name for the port group. net = {'name': 'pele', 'id': net_id} expected = '%s-%s-%s' % (dvs_id, 'pele', net_id) self.assertEqual(expected, p._get_vlan_network_name(net, dvs_id)) name = 'X' * 500 net = {'name': name, 'id': net_id} expected = '%s-%s-%s' % (dvs_id, name[:36], net_id) self.assertEqual(expected, p._get_vlan_network_name(net, dvs_id)) def test_get_vlan_network_name_with_net_name_missing(self): p = directory.get_plugin() net_id = uuidutils.generate_uuid() dvs_id = 'dvs-10' net = {'id': net_id} # Missing net['name'] should yield dvs_id-net_id as a name for the # port group. expected = '%s-%s' % (dvs_id, net_id) self.assertEqual(expected, p._get_vlan_network_name(net, dvs_id)) def _test_generate_tag(self, vlan_id): net_type = 'vlan' name = 'bridge_net' plugin = directory.get_plugin() plugin._network_vlans = utils.parse_network_vlan_ranges( cfg.CONF.nsxv.network_vlan_ranges) expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (pnet.NETWORK_TYPE, net_type), (pnet.PHYSICAL_NETWORK, 'dvs-70'), (pnet.SEGMENTATION_ID, vlan_id)] providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: 'dvs-70'} with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net: for k, v in expected: self.assertEqual(net['network'][k], v) def test_create_bridge_vlan_generate(self): cfg.CONF.set_default('network_vlan_ranges', 'dvs-70', 'nsxv') self._test_generate_tag(1) def test_create_bridge_vlan_generate_range(self): cfg.CONF.set_default('network_vlan_ranges', 'dvs-70:100:110', 'nsxv') self._test_generate_tag(100) def test_create_bridge_vlan_network_outofrange_returns_400(self): with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_bridge_network(vlan_id=5000) self.assertEqual(ctx_manager.exception.code, 400) def test_create_external_portgroup_network(self): name = 'ext_net' expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (extnet_apidef.EXTERNAL, True), (pnet.NETWORK_TYPE, 'portgroup'), (pnet.PHYSICAL_NETWORK, 'tzuuid')] providernet_args = {pnet.NETWORK_TYPE: 'portgroup', pnet.PHYSICAL_NETWORK: 'tzuuid', extnet_apidef.EXTERNAL: True} with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, extnet_apidef.EXTERNAL)) as net: for k, v in expected: self.assertEqual(net['network'][k], v) def test_create_portgroup_network(self): name = 'pg_net' expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (pnet.NETWORK_TYPE, 'portgroup'), (pnet.PHYSICAL_NETWORK, 'tzuuid')] providernet_args = {pnet.NETWORK_TYPE: 'portgroup', pnet.PHYSICAL_NETWORK: 'tzuuid'} with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net: for k, v in expected: self.assertEqual(net['network'][k], v) # try to create another one on the same physical net will failure res = self._create_network( self.fmt, name, True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) data = self.deserialize(self.fmt, res) self.assertIn('NeutronError', data) def test_delete_network_after_removing_subnet(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' fmt = 'json' # Create new network res = self._create_network(fmt=fmt, name='net', admin_state_up=True) network = self.deserialize(fmt, res) subnet = self._make_subnet(fmt, network, gateway_ip, cidr, ip_version=4) req = self.new_delete_request('subnets', subnet['subnet']['id']) sub_del_res = req.get_response(self.api) self.assertEqual(sub_del_res.status_int, 204) req = self.new_delete_request('networks', network['network']['id']) net_del_res = req.get_response(self.api) self.assertEqual(net_del_res.status_int, 204) def test_list_networks_with_shared(self): with self.network(name='net1'): with self.network(name='net2', shared=True): req = self.new_list_request('networks') res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(len(res['networks']), 2) req_2 = self.new_list_request('networks') req_2.environ['neutron.context'] = context.Context('', 'somebody') res = self.deserialize('json', req_2.get_response(self.api)) # tenant must see a single network self.assertEqual(len(res['networks']), 1) def test_create_network_name_exceeds_40_chars(self): name = 'this_is_a_network_whose_name_is_longer_than_40_chars' with self.network(name=name) as net: # Assert neutron name is not truncated self.assertEqual(net['network']['name'], name) def test_update_network_with_admin_false(self): data = {'network': {'admin_state_up': False}} with self.network() as net: plugin = directory.get_plugin() self.assertRaises(NotImplementedError, plugin.update_network, context.get_admin_context(), net['network']['id'], data) def test_create_extend_dvs_provider_network(self): name = 'provider_net' expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (pnet.NETWORK_TYPE, 'flat'), (pnet.PHYSICAL_NETWORK, 'dvs-uuid')] providernet_args = {pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: 'dvs-uuid'} with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net: for k, v in expected: self.assertEqual(net['network'][k], v) def test_create_same_vlan_network_with_different_dvs(self): name = 'dvs-provider-net' expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (pnet.NETWORK_TYPE, 'vlan'), (pnet.SEGMENTATION_ID, 43), (pnet.PHYSICAL_NETWORK, 'dvs-uuid-1')] providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 43, pnet.PHYSICAL_NETWORK: 'dvs-uuid-1'} with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)) as net: for k, v in expected: self.assertEqual(net['network'][k], v) expected_same_vlan = [(pnet.NETWORK_TYPE, 'vlan'), (pnet.SEGMENTATION_ID, 43), (pnet.PHYSICAL_NETWORK, 'dvs-uuid-2')] providernet_args_1 = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 43, pnet.PHYSICAL_NETWORK: 'dvs-uuid-2'} with self.network(name=name, providernet_args=providernet_args_1, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)) as net1: for k, v in expected_same_vlan: self.assertEqual(net1['network'][k], v) def test_create_vlan_network_with_multiple_dvs(self): name = 'multi-dvs-vlan-net' providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 100, pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2, dvs-3'} p = directory.get_plugin() with mock.patch.object( p, '_create_vlan_network_at_backend', # Return three netmorefs as side effect side_effect=[_uuid(), _uuid(), _uuid()]) as vlan_net_call: with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)): # _create_vlan_network_at_backend is expected to be called # three times since we have three DVS IDs in the physical # network attribute. self.assertEqual(3, vlan_net_call.call_count) def test_create_vlan_network_with_multiple_dvs_backend_failure(self): net_data = {'name': 'vlan-net', 'tenant_id': self._tenant_id, pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 100, pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2, dvs-3'} network = {'network': net_data} p = directory.get_plugin() with mock.patch.object( p, '_create_vlan_network_at_backend', # Return two successful netmorefs and fail on the backend # for the third netmoref creation as side effect. side_effect=[_uuid(), _uuid(), nsxv_exc.NsxPluginException(err_msg='')]): with mock.patch.object( p, '_delete_backend_network') as delete_net_call: self.assertRaises(nsxv_exc.NsxPluginException, p.create_network, context.get_admin_context(), network) # Two successfully created port groups should be rolled back # on the failure of third port group creation. self.assertEqual(2, delete_net_call.call_count) def test_create_vlan_network_with_multiple_dvs_not_found_failure(self): net_data = {'name': 'vlan-net', 'tenant_id': self._tenant_id, pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 100, pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2, dvs-3'} network = {'network': net_data} p = directory.get_plugin() with mock.patch.object( p, '_validate_provider_create', side_effect=[nsxv_exc.NsxResourceNotFound(res_id='dvs-2', res_name='dvs_id')]): with mock.patch.object( p, '_create_vlan_network_at_backend') as create_net_call: self.assertRaises(nsxv_exc.NsxResourceNotFound, p.create_network, context.get_admin_context(), network) # Verify no port group is created on the backend. self.assertEqual(0, create_net_call.call_count) def test_create_vlan_network_with_multiple_dvs_ignore_duplicate_dvs(self): name = 'multi-dvs-vlan-net' providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 100, pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2, dvs-1'} p = directory.get_plugin() with mock.patch.object( p, '_create_vlan_network_at_backend', # Return two netmorefs as side effect side_effect=[_uuid(), _uuid()]) as vlan_net_call: with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)): # _create_vlan_network_at_backend is expected to be called # two times since we have only two unique DVS IDs in the # physical network attribute. self.assertEqual(2, vlan_net_call.call_count) def test_update_vlan_network_add_dvs(self): name = 'multi-dvs-vlan-net' providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 100, pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2'} p = directory.get_plugin() with mock.patch.object( p, '_create_vlan_network_at_backend', # Return 3 netmorefs as side effect side_effect=[_uuid(), _uuid(), _uuid()]) as vlan_net_call: with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)) as net: # _create_vlan_network_at_backend is expected to be called # 2 times since we have 2 DVS IDs in the physical # network attribute. self.assertEqual(2, vlan_net_call.call_count) self.assertEqual('dvs-1, dvs-2', net['network'][pnet.PHYSICAL_NETWORK]) # Add another dvs data = {'network': {pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2, dvs-3'}} req = self.new_update_request('networks', data, net['network']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(3, vlan_net_call.call_count) self.assertEqual('dvs-1, dvs-2, dvs-3', res['network'][pnet.PHYSICAL_NETWORK]) # make sure it is updates also in the DB req = self.new_show_request('networks', net['network']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual('dvs-1, dvs-2, dvs-3', res['network'][pnet.PHYSICAL_NETWORK]) # update again - with no real change req = self.new_update_request('networks', data, net['network']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(3, vlan_net_call.call_count) self.assertEqual('dvs-1, dvs-2, dvs-3', res['network'][pnet.PHYSICAL_NETWORK]) def test_update_vlan_network_remove_dvs(self): name = 'multi-dvs-vlan-net' providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 100, pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2'} p = directory.get_plugin() with mock.patch.object( p, '_create_vlan_network_at_backend', # Return 2 netmorefs as side effect side_effect=[_uuid(), _uuid()]) as vlan_net_call,\ mock.patch.object( p, '_delete_backend_network') as del_net: with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)) as net: # _create_vlan_network_at_backend is expected to be called # 2 times since we have 2 DVS IDs in the physical # network attribute. self.assertEqual(2, vlan_net_call.call_count) self.assertEqual('dvs-1, dvs-2', net['network'][pnet.PHYSICAL_NETWORK]) # Keep only dvs-1 (Remove dvs-2) data = {'network': {pnet.PHYSICAL_NETWORK: 'dvs-1'}} req = self.new_update_request('networks', data, net['network']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(2, vlan_net_call.call_count) del_net.assert_called_once() self.assertEqual('dvs-1', res['network'][pnet.PHYSICAL_NETWORK]) # make sure it is updates also in the DB req = self.new_show_request('networks', net['network']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual('dvs-1', res['network'][pnet.PHYSICAL_NETWORK]) def test_get_dvs_ids_for_multiple_dvs_vlan_network(self): p = directory.get_plugin() default_dvs = 'fake_dvs_id' # If no DVS-ID is provided as part of physical network, return # global DVS-ID configured in nsx.ini physical_network = constants.ATTR_NOT_SPECIFIED self.assertEqual(['fake_dvs_id'], p._get_dvs_ids( physical_network, default_dvs)) # If DVS-IDs are provided as part of physical network as a comma # separated string, return them as a list of DVS-IDs. physical_network = 'dvs-1,dvs-2, dvs-3' expected_dvs_ids = ['dvs-1', 'dvs-2', 'dvs-3'] self.assertEqual(expected_dvs_ids, sorted(p._get_dvs_ids(physical_network, default_dvs))) # Ignore extra commas ',' in the physical_network attribute. physical_network = ',,,dvs-1,dvs-2,, dvs-3,' expected_dvs_ids = ['dvs-1', 'dvs-2', 'dvs-3'] self.assertEqual(expected_dvs_ids, sorted(p._get_dvs_ids(physical_network, default_dvs))) # Ignore duplicate DVS-IDs in the physical_network attribute. physical_network = ',,,dvs-1,dvs-2,, dvs-2,' expected_dvs_ids = ['dvs-1', 'dvs-2'] self.assertEqual(expected_dvs_ids, sorted(p._get_dvs_ids(physical_network, default_dvs))) def test_create_vxlan_with_tz_provider_network(self): name = 'provider_net_vxlan' expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (pnet.NETWORK_TYPE, 'vxlan'), (pnet.PHYSICAL_NETWORK, 'vdnscope-2')] providernet_args = {pnet.NETWORK_TYPE: 'vxlan', pnet.PHYSICAL_NETWORK: 'vdnscope-2'} with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net: for k, v in expected: self.assertEqual(net['network'][k], v) def test_create_vxlan_with_tz_provider_network_not_found_fail(self): name = 'provider_net_vxlan' data = {'network': { 'name': name, 'tenant_id': self._tenant_id, pnet.SEGMENTATION_ID: constants.ATTR_NOT_SPECIFIED, pnet.NETWORK_TYPE: 'vxlan', pnet.PHYSICAL_NETWORK: 'vdnscope-2'}} p = directory.get_plugin() with mock.patch.object(p.nsx_v.vcns, 'validate_vdn_scope', side_effect=[False]): self.assertRaises(nsxv_exc.NsxResourceNotFound, p.create_network, context.get_admin_context(), data) def test_create_network_with_qos_no_dvs_fail(self): # network creation should fail if the qos policy parameter exists, # and no use_dvs_features configured data = {'network': { 'name': 'test-qos', 'tenant_id': self._tenant_id, 'qos_policy_id': _uuid()}} plugin = directory.get_plugin() self.assertRaises(n_exc.InvalidInput, plugin.create_network, context.get_admin_context(), data) def test_update_network_with_qos_no_dvs_fail(self): # network update should fail if the qos policy parameter exists, # and no use_dvs_features configured data = {'network': {'qos_policy_id': _uuid()}} with self.network() as net: plugin = directory.get_plugin() self.assertRaises(n_exc.InvalidInput, plugin.update_network, context.get_admin_context(), net['network']['id'], data) @mock.patch.object(dvs.DvsManager, 'update_port_groups_config') @mock.patch.object(qos_utils.NsxVQosRule, '_init_from_policy_id') def test_create_network_with_qos_policy(self, fake_init_from_policy, fake_dvs_update): # enable dvs features to allow policy with QOS plugin = self._get_core_plugin_with_dvs() ctx = context.get_admin_context() # Mark init as complete, as otherwise QoS won't be called plugin.init_is_complete = True # fake policy id policy_id = _uuid() data = {'network': { 'name': 'test-qos', 'tenant_id': self._tenant_id, 'qos_policy_id': policy_id, 'port_security_enabled': False, 'admin_state_up': False, 'shared': False }} with mock.patch('vmware_nsx.services.qos.common.utils.' 'get_network_policy_id', return_value=policy_id): # create the network - should succeed and translate the policy id net = plugin.create_network(ctx, data) self.assertEqual(policy_id, net[qos_consts.QOS_POLICY_ID]) fake_init_from_policy.assert_called_once_with(ctx, policy_id) self.assertTrue(fake_dvs_update.called) # Get network should also return the qos policy id net2 = plugin.get_network(ctx, net['id']) self.assertEqual(policy_id, net2[qos_consts.QOS_POLICY_ID]) @mock.patch.object(dvs.DvsManager, 'update_port_groups_config') @mock.patch.object(qos_utils.NsxVQosRule, '_init_from_policy_id') def test_update_network_with_qos_policy(self, fake_init_from_policy, fake_dvs_update): # enable dvs features to allow policy with QOS plugin = self._get_core_plugin_with_dvs() ctx = context.get_admin_context() # create the network without qos policy data = {'network': { 'name': 'test-qos', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False }} net = plugin.create_network(ctx, data) # fake policy id policy_id = _uuid() data['network']['qos_policy_id'] = policy_id # update the network - should succeed and translate the policy id with mock.patch('vmware_nsx.services.qos.common.utils.' 'get_network_policy_id', return_value=policy_id): res = plugin.update_network(ctx, net['id'], data) self.assertEqual(policy_id, res[qos_consts.QOS_POLICY_ID]) fake_init_from_policy.assert_called_once_with(ctx, policy_id) self.assertTrue(fake_dvs_update.called) # Get network should also return the qos policy id net2 = plugin.get_network(ctx, net['id']) self.assertEqual(policy_id, net2[qos_consts.QOS_POLICY_ID]) def test_create_network_with_bad_az_hint(self): p = directory.get_plugin() ctx = context.get_admin_context() data = {'network': { 'name': 'test-qos', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False, 'availability_zone_hints': ['bad_hint'] }} self.assertRaises(n_exc.NeutronException, p.create_network, ctx, data) def test_create_network_with_az_hint(self): az_name = 'az7' set_az_in_config(az_name) p = directory.get_plugin() p._availability_zones_data = nsx_az.NsxVAvailabilityZones() ctx = context.get_admin_context() data = {'network': { 'name': 'test-qos', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False, 'availability_zone_hints': [az_name] }} # network creation should succeed net = p.create_network(ctx, data) self.assertEqual([az_name], net['availability_zone_hints']) # the availability zone is still empty until subnet creation self.assertEqual([], net['availability_zones']) class TestVnicIndex(NsxVPluginV2TestCase, test_vnic_index.VnicIndexDbTestCase): def test_update_port_twice_with_the_same_index(self): """Tests that updates which does not modify the port vnic index association do not produce any errors """ with self.subnet() as subnet: with self.port(subnet=subnet) as port: res = self._port_index_update(port['port']['id'], 2) self.assertEqual(2, res['port'][ext_vnic_idx.VNIC_INDEX]) res = self._port_index_update(port['port']['id'], 2) self.assertEqual(2, res['port'][ext_vnic_idx.VNIC_INDEX]) class TestPortsV2(NsxVPluginV2TestCase, test_plugin.TestPortsV2, test_bindings.PortBindingsTestCase, test_bindings.PortBindingsHostTestCaseMixin, test_bindings.PortBindingsVnicTestCaseMixin): VIF_TYPE = nsx_constants.VIF_TYPE_DVS HAS_PORT_FILTER = True def test_is_mac_in_use(self): ctx = context.get_admin_context() with self.port() as port: net_id = port['port']['network_id'] mac = port['port']['mac_address'] self.assertTrue(self.plugin._is_mac_in_use(ctx, net_id, mac)) mac2 = '00:22:00:44:00:66' # other mac, same network self.assertFalse(self.plugin._is_mac_in_use(ctx, net_id, mac2)) net_id2 = port['port']['id'] # other net uuid, same mac self.assertTrue(self.plugin._is_mac_in_use(ctx, net_id2, mac)) def test_duplicate_mac_generation(self): # simulate duplicate mac generation to make sure DBDuplicate is retried responses = ['12:34:56:78:00:00', '12:34:56:78:00:00', '12:34:56:78:00:01'] with mock.patch.object(net, 'get_random_mac', side_effect=responses) as grand_mac: with self.subnet(enable_dhcp=False) as s: with self.port(subnet=s) as p1, self.port(subnet=s) as p2: self.assertEqual('12:34:56:78:00:00', p1['port']['mac_address']) self.assertEqual('12:34:56:78:00:01', p2['port']['mac_address']) self.assertEqual(3, grand_mac.call_count) def test_get_ports_count(self): with self.port(), self.port(), self.port(), self.port() as p: tenid = p['port']['tenant_id'] ctx = context.Context(user_id=None, tenant_id=tenid, is_admin=False) pl = directory.get_plugin() count = pl.get_ports_count(ctx, filters={'tenant_id': [tenid]}) # Each port above has subnet => we have an additional port # for DHCP self.assertEqual(8, count) def test_requested_ips_only(self): with self.subnet(enable_dhcp=False) as subnet: fixed_ip_data = [{'ip_address': '10.0.0.2', 'subnet_id': subnet['subnet']['id']}] with self.port(subnet=subnet, fixed_ips=fixed_ip_data) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.2', ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) ips_only = ['10.0.0.18', '10.0.0.20', '10.0.0.22', '10.0.0.21', '10.0.0.3', '10.0.0.17', '10.0.0.19'] ports_to_delete = [] for i in ips_only: kwargs = {"fixed_ips": [{'ip_address': i}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port = self.deserialize(self.fmt, res) ports_to_delete.append(port) ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(i, ips[0]['ip_address']) self.assertEqual(subnet['subnet']['id'], ips[0]['subnet_id']) for p in ports_to_delete: self._delete('ports', p['port']['id']) def test_delete_network_port_exists_owned_by_network_race(self): self.skipTest('Skip need to address in future') def test_create_port_with_too_many_fixed_ips(self): self.skipTest('DHCP only supports one binding') def test_create_port_invalid_fixed_ip_address_v6_pd_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_update_port_invalid_fixed_ip_address_v6_pd_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_update_port_invalid_subnet_v6_pd_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_update_port_mac_v6_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_update_port_invalid_fixed_ip_address_v6_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_update_port_excluding_ipv6_slaac_subnet_from_fixed_ips(self): self.skipTest('No DHCP v6 Support yet') def test_requested_subnet_id_v6_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_ip_allocation_for_ipv6_subnet_slaac_address_mode(self): self.skipTest('No DHCP v6 Support yet') def test_requested_fixed_ip_address_v6_slaac_router_iface(self): self.skipTest('No DHCP v6 Support yet') def test_update_port_with_ipv6_slaac_subnet_in_fixed_ips(self): self.skipTest('No DHCP v6 Support yet') def test_requested_invalid_fixed_ip_address_v6_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_delete_port_with_ipv6_slaac_address(self): self.skipTest('No DHCP v6 Support yet') def test_ip_allocation_for_ipv6_2_subnet_slaac_mode(self): self.skipTest('No DHCP v6 Support yet') def _test_create_port_with_ipv6_subnet_in_fixed_ips(self, addr_mode, ipv6_pd=False): self.skipTest('No DHCP v6 Support yet') def test_update_port_with_new_ipv6_slaac_subnet_in_fixed_ips(self): self.skipTest('No DHCP v6 Support yet') def test_create_port_anticipating_allocation(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_list_ports(self): # for this test we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) with self.subnet(enable_dhcp=False) as subnet,\ self.port(subnet) as port1,\ self.port(subnet) as port2,\ self.port(subnet) as port3: self._test_list_resources('port', [port1, port2, port3]) def test_list_ports_public_network(self): with self.network(shared=True) as network: with self.subnet(network, enable_dhcp=False) as subnet,\ self.port(subnet, tenant_id='tenant_1') as port1,\ self.port(subnet, tenant_id='tenant_2') as port2: # Admin request - must return both ports self._test_list_resources('port', [port1, port2]) # Tenant_1 request - must return single port q_context = context.Context('', 'tenant_1') self._test_list_resources('port', [port1], neutron_context=q_context) # Tenant_2 request - must return single port q_context = context.Context('', 'tenant_2') self._test_list_resources('port', [port2], neutron_context=q_context) def test_list_ports_with_pagination_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=test_plugin._fake_get_pagination_helper) helper_patcher.start() cfg.CONF.set_default('allow_overlapping_ips', True) with self.subnet(enable_dhcp=False) as subnet,\ self.port(subnet, mac_address='00:00:00:00:00:01') as port1,\ self.port(subnet, mac_address='00:00:00:00:00:02') as port2,\ self.port(subnet, mac_address='00:00:00:00:00:03') as port3: self._test_list_with_pagination('port', (port1, port2, port3), ('mac_address', 'asc'), 2, 2) def test_list_ports_with_pagination_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") cfg.CONF.set_default('allow_overlapping_ips', True) with self.subnet(enable_dhcp=False) as subnet,\ self.port(subnet, mac_address='00:00:00:00:00:01') as port1,\ self.port(subnet, mac_address='00:00:00:00:00:02') as port2,\ self.port(subnet, mac_address='00:00:00:00:00:03') as port3: self._test_list_with_pagination('port', (port1, port2, port3), ('mac_address', 'asc'), 2, 2) def test_list_ports_with_sort_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_sorting_helper', new=test_plugin._fake_get_sorting_helper) helper_patcher.start() cfg.CONF.set_default('allow_overlapping_ips', True) with self.subnet(enable_dhcp=False) as subnet,\ self.port(subnet, admin_state_up='True', mac_address='00:00:00:00:00:01') as port1,\ self.port(subnet, admin_state_up='False', mac_address='00:00:00:00:00:02') as port2,\ self.port(subnet, admin_state_up='False', mac_address='00:00:00:00:00:03') as port3: self._test_list_with_sort('port', (port3, port2, port1), [('admin_state_up', 'asc'), ('mac_address', 'desc')]) def test_list_ports_with_sort_native(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") cfg.CONF.set_default('allow_overlapping_ips', True) with self.subnet(enable_dhcp=False) as subnet,\ self.port(subnet, admin_state_up='True', mac_address='00:00:00:00:00:01') as port1,\ self.port(subnet, admin_state_up='False', mac_address='00:00:00:00:00:02') as port2,\ self.port(subnet, admin_state_up='False', mac_address='00:00:00:00:00:03') as port3: self._test_list_with_sort('port', (port3, port2, port1), [('admin_state_up', 'asc'), ('mac_address', 'desc')]) def test_update_port_delete_ip(self): # This test case overrides the default because the nsx plugin # implements port_security/security groups and it is not allowed # to remove an ip address from a port unless the security group # is first removed. with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': [], secgrp.SECURITYGROUPS: []}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(res['port']['admin_state_up'], data['port']['admin_state_up']) self.assertEqual(res['port']['fixed_ips'], data['port']['fixed_ips']) def _update_port_index(self, port_id, device_id, index): data = {'port': {'device_id': device_id, 'vnic_index': index}} req = self.new_update_request('ports', data, port_id) res = self.deserialize('json', req.get_response(self.api)) return res @mock.patch.object(edge_utils.EdgeManager, 'delete_dhcp_binding') def _test_update_port_index_and_spoofguard( self, ip_version, subnet_cidr, port_ip, port_mac, ipv6_lla, delete_dhcp_binding): q_context = context.Context('', 'tenant_1') device_id = _uuid() with self.subnet(ip_version=ip_version, enable_dhcp=(False if ip_version == 6 else True), cidr=subnet_cidr, gateway_ip=None) as subnet: fixed_ip_data = [{'ip_address': port_ip, 'subnet_id': subnet['subnet']['id']}] with self.port(subnet=subnet, device_id=device_id, mac_address=port_mac, fixed_ips=fixed_ip_data, device_owner='compute:None') as port: self.assertIsNone(port['port']['vnic_index']) self.fc2.approve_assigned_addresses = ( mock.Mock().approve_assigned_addresses) self.fc2.publish_assigned_addresses = ( mock.Mock().publish_assigned_addresses) self.fc2.inactivate_vnic_assigned_addresses = ( mock.Mock().inactivate_vnic_assigned_addresses) vnic_index = 3 res = self._update_port_index( port['port']['id'], device_id, vnic_index) self.assertEqual(vnic_index, res['port']['vnic_index']) policy_id = nsxv_db.get_spoofguard_policy_id( q_context.session, port['port']['network_id']) vnic_id = '%s.%03d' % (device_id, vnic_index) # Verify that the spoofguard policy assigned and published expected_ips = [port_ip] if ipv6_lla: expected_ips.append(ipv6_lla) (self.fc2.approve_assigned_addresses. assert_called_once_with(policy_id, vnic_id, port_mac, expected_ips)) (self.fc2.publish_assigned_addresses. assert_called_once_with(policy_id, vnic_id)) # Updating the vnic_index to None implies the vnic does # no longer obtain the addresses associated with this port, # we need to inactivate previous addresses configurations for # this vnic in the context of this network spoofguard policy. res = self._update_port_index(port['port']['id'], '', None) (self.fc2.inactivate_vnic_assigned_addresses. assert_called_once_with(policy_id, vnic_id)) self.assertTrue(delete_dhcp_binding.called) def test_update_port_index(self): ip_version = 4 subnet_cidr = '10.0.0.0/24' port_ip = '10.0.0.8' port_mac = '00:00:00:00:00:02' ipv6_lla = None self._test_update_port_index_and_spoofguard( ip_version, subnet_cidr, port_ip, port_mac, ipv6_lla) def test_update_port_index_ipv6(self): ip_version = 6 subnet_cidr = 'ae80::/64' port_mac = '00:00:00:00:00:02' ipv6_lla = 'fe80::200:ff:fe00:2' port_ip = 'ae80::2' self._test_update_port_index_and_spoofguard( ip_version, subnet_cidr, port_ip, port_mac, ipv6_lla) def test_update_port_with_compute_device_owner(self): """ Test that DHCP binding is created when ports 'device_owner' is updated to compute, for example when attaching an interface to a instance with existing port. """ with self.port() as port: with mock.patch(PLUGIN_NAME + '._create_dhcp_static_binding'): update = {'port': {'device_owner'}} self.new_update_request('ports', update, port['port']['id']) def test_ports_vif_host(self): cfg.CONF.set_default('allow_overlapping_ips', True) host_arg = {portbindings.HOST_ID: self.hostname} with self.subnet(enable_dhcp=False) as subnet,\ self.port(subnet, name='name1', arg_list=(portbindings.HOST_ID,), **host_arg),\ self.port(subnet, name='name2'): ctx = context.get_admin_context() ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(2, len(ports)) for port in ports: if port['name'] == 'name1': self._check_response_portbindings_host(port) else: self.assertFalse(port[portbindings.HOST_ID]) # By default user is admin - now test non admin user ctx = context.Context(user_id=None, tenant_id=self._tenant_id, is_admin=False) ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(2, len(ports)) for non_admin_port in ports: self._check_response_no_portbindings_host(non_admin_port) def test_ports_vif_host_update(self): cfg.CONF.set_default('allow_overlapping_ips', True) host_arg = {portbindings.HOST_ID: self.hostname} with self.subnet(enable_dhcp=False) as subnet,\ self.port(subnet, name='name1', arg_list=(portbindings.HOST_ID,), **host_arg) as port1,\ self.port(subnet, name='name2') as port2: data = {'port': {portbindings.HOST_ID: 'testhosttemp'}} req = self.new_update_request( 'ports', data, port1['port']['id']) req.get_response(self.api) req = self.new_update_request( 'ports', data, port2['port']['id']) ctx = context.get_admin_context() req.get_response(self.api) ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(2, len(ports)) for port in ports: self.assertEqual('testhosttemp', port[portbindings.HOST_ID]) def test_ports_vif_details(self): plugin = directory.get_plugin() cfg.CONF.set_default('allow_overlapping_ips', True) with self.subnet(enable_dhcp=False) as subnet,\ self.port(subnet), self.port(subnet): ctx = context.get_admin_context() ports = plugin.get_ports(ctx) self.assertEqual(len(ports), 2) for port in ports: self._check_response_portbindings(port) # By default user is admin - now test non admin user ctx = self._get_non_admin_context() ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(len(ports), 2) for non_admin_port in ports: self._check_response_no_portbindings(non_admin_port) def test_ports_vnic_type(self): cfg.CONF.set_default('allow_overlapping_ips', True) vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type} with self.subnet(enable_dhcp=False) as subnet,\ self.port(subnet, name='name1', arg_list=(portbindings.VNIC_TYPE,), **vnic_arg),\ self.port(subnet, name='name2'): ctx = context.get_admin_context() ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(2, len(ports)) for port in ports: if port['name'] == 'name1': self._check_response_portbindings_vnic_type(port) else: self.assertEqual(portbindings.VNIC_NORMAL, port[portbindings.VNIC_TYPE]) # By default user is admin - now test non admin user ctx = context.Context(user_id=None, tenant_id=self._tenant_id, is_admin=False) ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(2, len(ports)) for non_admin_port in ports: self._check_response_portbindings_vnic_type(non_admin_port) def test_ports_vnic_type_list(self): cfg.CONF.set_default('allow_overlapping_ips', True) vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type} with self.subnet(enable_dhcp=False) as subnet,\ self.port(subnet, name='name1', arg_list=(portbindings.VNIC_TYPE,), **vnic_arg) as port1,\ self.port(subnet, name='name2') as port2,\ self.port(subnet, name='name3', arg_list=(portbindings.VNIC_TYPE,), **vnic_arg) as port3: self._test_list_resources('port', (port1, port2, port3), query_params='%s=%s' % ( portbindings.VNIC_TYPE, self.vnic_type)) def test_port_invalid_vnic_type(self): with self._test_create_direct_network(vlan_id=7) as network: kwargs = {portbindings.VNIC_TYPE: 'invalid', psec.PORTSECURITY: False} net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE, psec.PORTSECURITY), **kwargs) self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) def test_range_allocation(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_requested_subnet_id_v4_and_v6(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_update_port_update_ip(self): """Test update of port IP. Check that a configured IP 10.0.0.2 is replaced by 10.0.0.10. """ with self.subnet(enable_dhcp=False) as subnet: fixed_ip_data = [{'ip_address': '10.0.0.2', 'subnet_id': subnet['subnet']['id']}] with self.port(subnet=subnet, fixed_ips=fixed_ip_data) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.2', ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': "10.0.0.10"}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.10', ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) def test_update_port_update_ips(self): """Update IP and associate new IP on port. Check a port update with the specified subnet_id's. A IP address will be allocated for each subnet_id. """ with self.subnet(enable_dhcp=False) as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.3'}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['port']['admin_state_up'], res['port']['admin_state_up']) ips = res['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.3', ips[0]['ip_address'], '10.0.0.3') self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) def test_update_port_update_ip_dhcp(self): #Test updating a port IP when the device owner is DHCP with self.subnet(enable_dhcp=False) as subnet: with self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_DHCP) as port: data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': "10.0.0.10"}]}} plugin = directory.get_plugin() ctx = context.get_admin_context() with mock.patch.object( plugin.edge_manager, 'update_dhcp_edge_service') as update_dhcp: plugin.update_port(ctx, port['port']['id'], data) self.assertTrue(update_dhcp.called) def test_update_port_update_ip_compute(self): #Test that updating a port IP succeed if the device owner starts #with compute. owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'xxx' with self.subnet(enable_dhcp=False) as subnet: with self.port(subnet=subnet, device_id=_uuid(), device_owner=owner) as port: data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': "10.0.0.10"}]}} plugin = directory.get_plugin() with mock.patch.object( plugin.edge_manager, 'delete_dhcp_binding') as delete_dhcp: with mock.patch.object( plugin.edge_manager, 'create_static_binding') as create_static: with mock.patch.object( plugin.edge_manager, 'create_dhcp_bindings') as create_dhcp: plugin.update_port(context.get_admin_context(), port['port']['id'], data) self.assertTrue(delete_dhcp.called) self.assertTrue(create_static.called) self.assertTrue(create_dhcp.called) def test_update_port_update_ip_and_owner_fail(self): #Test that updating a port IP and device owner at the same #transaction fails with self.subnet(enable_dhcp=False) as subnet: with self.port(subnet=subnet, device_owner='aaa') as port: data = {'port': {'device_owner': 'bbb', 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': "10.0.0.10"}]}} plugin = directory.get_plugin() self.assertRaises(n_exc.BadRequest, plugin.update_port, context.get_admin_context(), port['port']['id'], data) def test_update_port_update_ip_router(self): #Test that updating a port IP succeed if the device owner is a router owner = constants.DEVICE_OWNER_ROUTER_GW router_id = _uuid() old_ip = '10.0.0.3' new_ip = '10.0.0.10' with self.subnet(enable_dhcp=False) as subnet: with self.port(subnet=subnet, device_id=router_id, device_owner=owner, fixed_ips=[{'ip_address': old_ip}]) as port: data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} plugin = directory.get_plugin() ctx = context.get_admin_context() router_obj = router_driver.RouterSharedDriver(plugin) with mock.patch.object(plugin, '_find_router_driver', return_value=router_obj): with mock.patch.object( router_obj, 'update_router_interface_ip') as update_router: port_id = port['port']['id'] plugin.update_port(ctx, port_id, data) net_id = port['port']['network_id'] update_router.assert_called_once_with( ctx, router_id, port_id, net_id, old_ip, new_ip, "255.255.255.0") def test_update_port_update_ip_unattached_router(self): #Test that updating a port IP succeed if the device owner is a router #and the shared router is not attached to any edge yet owner = constants.DEVICE_OWNER_ROUTER_GW router_id = _uuid() old_ip = '10.0.0.3' new_ip = '10.0.0.10' with self.subnet(enable_dhcp=False) as subnet: with self.port(subnet=subnet, device_id=router_id, device_owner=owner, fixed_ips=[{'ip_address': old_ip}]) as port: data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} plugin = directory.get_plugin() ctx = context.get_admin_context() router_obj = router_driver.RouterSharedDriver(plugin) with mock.patch.object(plugin, '_find_router_driver', return_value=router_obj): # make sure the router will not be attached to an edge with mock.patch.object( edge_utils, 'get_router_edge_id', return_value=None): port_id = port['port']['id'] # The actual test here is that this call does not # raise an exception new_port = plugin.update_port(ctx, port_id, data) ips = new_port['fixed_ips'] self.assertEqual(len(ips), 1) self.assertEqual(ips[0]['ip_address'], new_ip) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) def test_update_port_delete_ip_router(self): #Test that deleting a port IP succeed if the device owner is a router owner = constants.DEVICE_OWNER_ROUTER_GW router_id = _uuid() old_ip = '10.0.0.3' with self.subnet(enable_dhcp=False) as subnet: with self.port(subnet=subnet, device_id=router_id, device_owner=owner, fixed_ips=[{'ip_address': old_ip}]) as port: data = {'port': {'fixed_ips': []}} plugin = directory.get_plugin() ctx = context.get_admin_context() router_obj = router_driver.RouterSharedDriver(plugin) with mock.patch.object(plugin, '_find_router_driver', return_value=router_obj): with mock.patch.object( router_obj, 'update_router_interface_ip') as update_router: port_id = port['port']['id'] plugin.update_port(ctx, port_id, data) net_id = port['port']['network_id'] update_router.assert_called_once_with( ctx, router_id, port_id, net_id, old_ip, None, None) def test_update_port_add_additional_ip(self): """Test update of port with additional IP fails.""" with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_port_additional_ip(self): """Test that creation of port with additional IP fails.""" with self.subnet() as subnet: data = {'port': {'network_id': subnet['subnet']['network_id'], 'tenant_id': subnet['subnet']['tenant_id'], 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]}} port_req = self.new_create_request('ports', data) res = port_req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_update_port_update_ip_address_only(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_requested_invalid_fixed_ips(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_requested_subnet_id_v4_and_v6_slaac(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_update_dhcp_port_with_exceeding_fixed_ips(self): self.skipTest('Updating dhcp port IP is not supported') def test_create_router_port_ipv4_and_ipv6_slaac_no_fixed_ips(self): self.skipTest('No DHCP v6 Support yet') def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self): # This test should fail as the NSX-v plugin should cause Neutron to # return a 400 status code with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: super(TestPortsV2, self).\ test_create_port_with_multiple_ipv4_and_ipv6_subnets() self.assertEqual(ctx_manager.exception.code, 400) def test_list_ports_for_network_owner(self): with self.network(tenant_id='tenant_1') as network: with self.subnet(network, enable_dhcp=False) as subnet: with self.port(subnet, tenant_id='tenant_1') as port1,\ self.port(subnet, tenant_id='tenant_2') as port2: # network owner request, should return all ports port_res = self._list_ports( 'json', set_context=True, tenant_id='tenant_1') port_list = self.deserialize('json', port_res)['ports'] port_ids = [p['id'] for p in port_list] self.assertEqual(2, len(port_list)) self.assertIn(port1['port']['id'], port_ids) self.assertIn(port2['port']['id'], port_ids) # another tenant request, only return ports belong to it port_res = self._list_ports( 'json', set_context=True, tenant_id='tenant_2') port_list = self.deserialize('json', port_res)['ports'] port_ids = [p['id'] for p in port_list] self.assertEqual(1, len(port_list)) self.assertNotIn(port1['port']['id'], port_ids) self.assertIn(port2['port']['id'], port_ids) def test_mac_duplication(self): # create 2 networks res = self._create_network(fmt=self.fmt, name='net1', admin_state_up=True) network1 = self.deserialize(self.fmt, res) net1_id = network1['network']['id'] res = self._create_network(fmt=self.fmt, name='net2', admin_state_up=True) network2 = self.deserialize(self.fmt, res) net2_id = network2['network']['id'] # create a port on the first network mac = '33:00:00:00:00:01' res = self._create_port(self.fmt, net_id=net1_id, arg_list=('mac_address',), mac_address=mac) port1 = self.deserialize('json', res) self.assertEqual(mac, port1['port']['mac_address']) # creating another port on a different network with the same mac # should fail res = self._create_port(self.fmt, net_id=net2_id, arg_list=('mac_address',), mac_address=mac) port2 = self.deserialize('json', res) self.assertEqual("MacAddressInUse", port2['NeutronError']['type']) def _test_create_direct_network(self, vlan_id=0): net_type = vlan_id and 'vlan' or 'flat' name = 'direct_net' providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: 'tzuuid'} if vlan_id: providernet_args[pnet.SEGMENTATION_ID] = vlan_id return self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)) def test_create_port_vnic_direct(self): with self._test_create_direct_network(vlan_id=7) as network: # Check that port security conflicts kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT, psec.PORTSECURITY: True} net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE, psec.PORTSECURITY), **kwargs) self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) # Check that security group conflicts kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT, 'security_groups': [ '4cd70774-cc67-4a87-9b39-7d1db38eb087'], psec.PORTSECURITY: False} net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE, psec.PORTSECURITY), **kwargs) self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) # All is kosher so we can create the port kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT} net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE,), **kwargs) port = self.deserialize('json', res) self.assertEqual("direct", port['port'][portbindings.VNIC_TYPE]) def test_create_port_vnic_direct_invalid_network(self): with self.network(name='not vlan/flat') as net: kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT, psec.PORTSECURITY: False} net_id = net['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE, psec.PORTSECURITY), **kwargs) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_update_vnic_direct(self): with self._test_create_direct_network(vlan_id=7) as network: with self.subnet(network=network) as subnet: with self.port(subnet=subnet) as port: # need to do two updates as the update for port security # disabled requires that it can only change 2 items data = {'port': {psec.PORTSECURITY: False, 'security_groups': []}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(portbindings.VNIC_NORMAL, res['port'][portbindings.VNIC_TYPE]) data = {'port': {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(portbindings.VNIC_DIRECT, res['port'][portbindings.VNIC_TYPE]) def test_delete_network_port_exists_owned_by_network_port_not_found(self): """Tests that we continue to gracefully delete the network even if a neutron:dhcp-owned port was deleted concurrently. """ res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) network_id = network['network']['id'] self._create_port(self.fmt, network_id, device_owner=constants.DEVICE_OWNER_DHCP) # Raise PortNotFound when trying to delete the port to simulate a # concurrent delete race; note that we actually have to delete the port # "out of band" otherwise deleting the network will fail because of # constraints in the data model. plugin = directory.get_plugin() orig_delete = plugin.delete_port def fake_delete_port(context, id, force_delete_dhcp=False): # Delete the port for real from the database and then raise # PortNotFound to simulate the race. self.assertIsNone(orig_delete( context, id, force_delete_dhcp=force_delete_dhcp)) raise n_exc.PortNotFound(port_id=id) p = mock.patch.object(plugin, 'delete_port') mock_del_port = p.start() mock_del_port.side_effect = fake_delete_port req = self.new_delete_request('networks', network_id) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_create_port_sec_disabled_and_provider_rule(self): with self.network() as network: kwargs = {'provider_security_groups': [uuidutils.generate_uuid()], 'port_security_enabled': False} res = self._create_port(self.fmt, network['network']['id'], arg_list=('provider_security_groups', 'port_security_enabled'), **kwargs) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_update_port_sec_disabled_and_provider_rule(self): with self.port() as port: with mock.patch( PLUGIN_NAME + '._get_provider_security_groups_on_port'): data = {'port': {'port_security_enabled': False}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual("PortSecurityAndIPRequiredForSecurityGroups", res['NeutronError']['type']) class TestSubnetsV2(NsxVPluginV2TestCase, test_plugin.TestSubnetsV2): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): super(TestSubnetsV2, self).setUp() self.context = context.get_admin_context() def _test_subnet_update_ipv4_and_ipv6_pd_subnets(self, ra_addr_mode): self.skipTest('No DHCP v6 Support yet') def test__subnet_ipv6_not_supported(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'gateway': 'fe80::1', 'cidr': '2607:f0d0:1002:51::/64', 'ip_version': '6', 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_ipv6_gw_is_nw_end_addr_returns_201(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_out_of_cidr_global(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_pd_gw_values(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_slaac_with_port_on_network(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_slaac_with_snat_intf_on_network(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_dhcpv6_stateless_with_port_on_network(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_slaac_with_dhcp_port_on_network(self): self.skipTest('No DHCP v6 Support yet') def test_delete_subnet_ipv6_slaac_port_exists(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_slaac_with_router_intf_on_network(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_out_of_cidr_lla(self): self.skipTest('No DHCP v6 Support yet') def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_only_ip_version_v6(self): self.skipTest('No DHCP v6 Support yet') def test_update_subnet_ipv6_address_mode_fails(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_with_v6_allocation_pool(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_with_v6_pd_allocation_pool(self): self.skipTest('No DHCP v6 Support yet') def test_update_subnet_ipv6_ra_mode_fails(self): self.skipTest('No DHCP v6 Support yet') def test_delete_subnet_ipv6_slaac_router_port_exists(self): self.skipTest('No DHCP v6 Support yet') def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self): self.skipTest('No DHCP v6 Support yet') def test_update_subnet_inconsistent_ipv6_gatewayv4(self): self.skipTest('No DHCP v6 Support yet') def test_update_subnet_ipv6_attributes_fails(self): self.skipTest('No DHCP v6 Support yet') def test_update_subnet_ipv6_cannot_disable_dhcp(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_V6_pd_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_V6_pd_stateless(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_V6_pd_statefull(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_V6_pd_no_mode(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_slaac_with_ip_already_allocated(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_dhcpv6_stateless_with_ip_already_allocated(self): self.skipTest('No DHCP v6 Support yet') def _create_subnet_bulk(self, fmt, number, net_id, name, ip_version=4, **kwargs): base_data = {'subnet': {'network_id': net_id, 'ip_version': ip_version, 'enable_dhcp': False, 'tenant_id': self._tenant_id}} # auto-generate cidrs as they should not overlap overrides = dict((k, v) for (k, v) in zip(range(number), [{'cidr': "10.0.%s.0/24" % num} for num in range(number)])) kwargs.update({'override': overrides}) return self._create_bulk(fmt, number, 'subnet', base_data, **kwargs) def test_create_subnet_nonzero_cidr(self): awkward_cidrs = [{'nonezero': '10.129.122.5/8', 'corrected': '10.0.0.0/8'}, {'nonezero': '11.129.122.5/15', 'corrected': '11.128.0.0/15'}, {'nonezero': '12.129.122.5/16', 'corrected': '12.129.0.0/16'}, {'nonezero': '13.129.122.5/18', 'corrected': '13.129.64.0/18'}, {'nonezero': '14.129.122.5/22', 'corrected': '14.129.120.0/22'}, {'nonezero': '15.129.122.5/24', 'corrected': '15.129.122.0/24'}, {'nonezero': '16.129.122.5/28', 'corrected': '16.129.122.0/28'}, ] for cidr in awkward_cidrs: with self.subnet(enable_dhcp=False, cidr=cidr['nonezero']) as subnet: # the API should accept and correct these cidrs for users self.assertEqual(cidr['corrected'], subnet['subnet']['cidr']) with self.subnet(enable_dhcp=False, cidr='17.129.122.5/32', gateway_ip=None) as subnet: self.assertEqual('17.129.122.5/32', subnet['subnet']['cidr']) def test_create_subnet_ipv6_attributes(self): # Expected to fail for now as we don't support IPv6 for NSXv cidr = "fe80::/80" with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(cidr=cidr) self.assertEqual(ctx_manager.exception.code, 400) def test_create_subnet_with_different_dhcp_server(self): self.mock_create_dhcp_service.stop() name = 'dvs-provider-net' providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 43, pnet.PHYSICAL_NETWORK: 'dvs-uuid'} with self.network(name=name, do_delete=False, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)) as net: self._test_create_subnet(network=net, cidr='10.0.0.0/24') dhcp_router_id = (vcns_const.DHCP_EDGE_PREFIX + net['network']['id'])[:36] dhcp_server_id = nsxv_db.get_nsxv_router_binding( self.context.session, dhcp_router_id)['edge_id'] providernet_args_1 = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 43, pnet.PHYSICAL_NETWORK: 'dvs-uuid-1'} with self.network(name=name, do_delete=False, providernet_args=providernet_args_1, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)) as net1: self._test_create_subnet(network=net1, cidr='10.0.1.0/24') router_id = (vcns_const.DHCP_EDGE_PREFIX + net1['network']['id'])[:36] dhcp_server_id_1 = nsxv_db.get_nsxv_router_binding( self.context.session, router_id)['edge_id'] self.assertNotEqual(dhcp_server_id, dhcp_server_id_1) def test_create_subnet_with_different_dhcp_by_flat_net(self): self.mock_create_dhcp_service.stop() name = 'flat-net' providernet_args = {pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: 'dvs-uuid'} with self.network(name=name, do_delete=False, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net: self._test_create_subnet(network=net, cidr='10.0.0.0/24') dhcp_router_id = (vcns_const.DHCP_EDGE_PREFIX + net['network']['id'])[:36] dhcp_server_id = nsxv_db.get_nsxv_router_binding( self.context.session, dhcp_router_id)['edge_id'] providernet_args_1 = {pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: 'dvs-uuid'} with self.network(name=name, do_delete=False, providernet_args=providernet_args_1, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net1: self._test_create_subnet(network=net1, cidr='10.0.1.0/24') router_id = (vcns_const.DHCP_EDGE_PREFIX + net1['network']['id'])[:36] dhcp_server_id_1 = nsxv_db.get_nsxv_router_binding( self.context.session, router_id)['edge_id'] self.assertNotEqual(dhcp_server_id, dhcp_server_id_1) def test_create_subnets_with_different_tenants_non_shared(self): cfg.CONF.set_override('share_edges_between_tenants', False, group="nsxv") self.mock_create_dhcp_service.stop() # create 2 networks with different tenants with self.network(name='net1', tenant_id='fake1') as net1,\ self.network(name='net2', tenant_id='fake2') as net2: # create 2 non-overlapping subnets self._test_create_subnet(network=net1, cidr='10.0.0.0/24') router_id1 = (vcns_const.DHCP_EDGE_PREFIX + net1['network']['id'])[:36] edge1 = nsxv_db.get_nsxv_router_binding( self.context.session, router_id1)['edge_id'] self._test_create_subnet(network=net2, cidr='20.0.0.0/24') router_id2 = (vcns_const.DHCP_EDGE_PREFIX + net2['network']['id'])[:36] edge2 = nsxv_db.get_nsxv_router_binding( self.context.session, router_id2)['edge_id'] # make sure we have 2 separate dhcp edges self.assertNotEqual(edge1, edge2) def test_create_subnets_with_different_tenants_shared(self): cfg.CONF.set_override('share_edges_between_tenants', True, group="nsxv") self.mock_create_dhcp_service.stop() # create 2 networks with different tenants with self.network(name='net1', tenant_id='fake1') as net1,\ self.network(name='net2', tenant_id='fake2') as net2: # create 2 non-overlapping subnets self._test_create_subnet(network=net1, cidr='10.0.0.0/24') router_id1 = (vcns_const.DHCP_EDGE_PREFIX + net1['network']['id'])[:36] edge1 = nsxv_db.get_nsxv_router_binding( self.context.session, router_id1)['edge_id'] self._test_create_subnet(network=net2, cidr='20.0.0.0/24') router_id2 = (vcns_const.DHCP_EDGE_PREFIX + net2['network']['id'])[:36] edge2 = nsxv_db.get_nsxv_router_binding( self.context.session, router_id2)['edge_id'] # make sure we have both networks on the same dhcp edges self.assertEqual(edge1, edge2) def test_create_subnet_ipv6_slaac_with_db_reference_error(self): self.skipTest('Currently not supported') def test_create_subnet_ipv6_slaac_with_port_not_found(self): self.skipTest('Currently not supported') def test_create_subnet_ipv6_gw_values(self): # This test should fail with response code 400 as IPv6 subnets with # DHCP are not supported by this plugin with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: super(TestSubnetsV2, self).test_create_subnet_ipv6_gw_values() self.assertEqual(ctx_manager.exception.code, 400) def test_create_subnet_only_ip_version_v6_old(self): self.skipTest('Currently not supported') def test_create_subnet_reserved_network(self): self.mock_create_dhcp_service.stop() name = 'overlap-reserved-net' providernet_args = {pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: 'dvs-uuid'} with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: with self.network(name=name, do_delete=False, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)) as net: self._test_create_subnet(network=net, cidr='169.254.128.128/25') self.assertEqual(ctx_manager.exception.code, 400) class TestSubnetPoolsV2(NsxVPluginV2TestCase, test_plugin.TestSubnetsV2): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): super(TestSubnetPoolsV2, self).setUp() self.context = context.get_admin_context() def test_subnet_update_ipv4_and_ipv6_pd_slaac_subnets(self): self.skipTest('No DHCP v6 Support yet') def test_subnet_update_ipv4_and_ipv6_pd_v6stateless_subnets(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_gw_is_nw_end_addr_returns_201(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_out_of_cidr_global(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_V6_pd_stateless(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_V6_pd_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_slaac_with_ip_already_allocated(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_dhcpv6_stateless_with_ip_already_allocated(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_dhcpv6_stateless_with_port_on_network(self): self.skipTest('Not supported') def test_create_subnet_ipv6_gw_values(self): self.skipTest('Not supported') def test_create_subnet_ipv6_out_of_cidr_lla(self): self.skipTest('Not supported') def test_create_subnet_ipv6_pd_gw_values(self): self.skipTest('Not supported') def test_create_subnet_ipv6_slaac_with_db_reference_error(self): self.skipTest('Not supported') def test_create_subnet_ipv6_slaac_with_port_not_found(self): self.skipTest('Not supported') def test_create_subnet_ipv6_slaac_with_dhcp_port_on_network(self): self.skipTest('Not supported') def test_create_subnet_ipv6_slaac_with_port_on_network(self): self.skipTest('Not supported') def test_create_subnet_ipv6_slaac_with_router_intf_on_network(self): self.skipTest('Not supported') def test_create_subnet_ipv6_slaac_with_snat_intf_on_network(self): self.skipTest('Not supported') def test_create_subnet_only_ip_version_v6(self): self.skipTest('Not supported') def test_create_subnet_with_v6_allocation_pool(self): self.skipTest('Not supported') def test_create_subnet_with_v6_pd_allocation_pool(self): self.skipTest('Not supported') def test_delete_subnet_ipv6_slaac_port_exists(self): self.skipTest('Not supported') def test_delete_subnet_ipv6_slaac_router_port_exists(self): self.skipTest('Not supported') def test_update_subnet_inconsistent_ipv6_gatewayv4(self): self.skipTest('Not supported') def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self): self.skipTest('Not supported') def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self): self.skipTest('Not supported') def test_update_subnet_ipv6_address_mode_fails(self): self.skipTest('Not supported') def test_update_subnet_ipv6_attributes_fails(self): self.skipTest('Not supported') def test_update_subnet_ipv6_cannot_disable_dhcp(self): self.skipTest('Not supported') def test_update_subnet_ipv6_ra_mode_fails(self): self.skipTest('Not supported') def test_create_subnet_only_ip_version_v6_old(self): self.skipTest('Currently not supported') class TestBasicGet(test_plugin.TestBasicGet, NsxVPluginV2TestCase): pass class TestV2HTTPResponse(test_plugin.TestV2HTTPResponse, NsxVPluginV2TestCase): pass class TestL3ExtensionManager(object): def get_resources(self): # Simulate extension of L3 attribute map l3.L3().update_attributes_map( l3_egm_apidef.RESOURCE_ATTRIBUTE_MAP) l3.L3().update_attributes_map( dvr_apidef.RESOURCE_ATTRIBUTE_MAP) l3.L3().update_attributes_map( router_type.EXTENDED_ATTRIBUTES_2_0) l3.L3().update_attributes_map( router_size.EXTENDED_ATTRIBUTES_2_0) l3.L3().update_attributes_map( raz_apidef.RESOURCE_ATTRIBUTE_MAP) l3.L3().update_attributes_map( l3fav_apidef.RESOURCE_ATTRIBUTE_MAP) return (l3.L3.get_resources() + address_scope.Address_scope.get_resources()) def get_actions(self): return [] def get_request_extensions(self): return [] class L3NatTest(test_l3_plugin.L3BaseForIntTests, NsxVPluginV2TestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): cfg.CONF.set_override('task_status_check_interval', 200, group="nsxv") cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) ext_mgr = ext_mgr or TestL3ExtensionManager() super(L3NatTest, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.plugin_instance = directory.get_plugin() self._plugin_name = "%s.%s" % ( self.plugin_instance.__module__, self.plugin_instance.__class__.__name__) self._plugin_class = self.plugin_instance.__class__ def tearDown(self): plugin = directory.get_plugin() _manager = plugin.nsx_v.task_manager # wait max ~10 seconds for all tasks to be finished for i in range(100): if not _manager.has_pending_task(): break greenthread.sleep(0.1) if _manager.has_pending_task(): _manager.show_pending_tasks() raise Exception(_("Tasks not completed")) _manager.stop() # Ensure the manager thread has been stopped self.assertIsNone(_manager._thread) super(L3NatTest, self).tearDown() def _create_l3_ext_network(self, vlan_id=None): name = 'l3_ext_net' return self.network(name=name, router__external=True) def _create_router(self, fmt, tenant_id, name=None, admin_state_up=None, set_context=False, arg_list=None, **kwargs): tenant_id = tenant_id or _uuid() data = {'router': {'tenant_id': tenant_id}} if name: data['router']['name'] = name if admin_state_up: data['router']['admin_state_up'] = admin_state_up for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())): # Arg must be present and not empty if kwargs.get(arg): data['router'][arg] = kwargs[arg] router_req = self.new_create_request('routers', data, fmt) if set_context and tenant_id: # create a specific auth context for this request router_req.environ['neutron.context'] = context.Context( '', tenant_id) return router_req.get_response(self.ext_api) def _make_router(self, fmt, tenant_id, name=None, admin_state_up=None, external_gateway_info=None, set_context=False, arg_list=None, **kwargs): if external_gateway_info: arg_list = ('external_gateway_info', ) + (arg_list or ()) res = self._create_router(fmt, tenant_id, name, admin_state_up, set_context, arg_list=arg_list, external_gateway_info=external_gateway_info, **kwargs) return self.deserialize(fmt, res) @contextlib.contextmanager def router(self, name=None, admin_state_up=True, fmt=None, tenant_id=None, external_gateway_info=None, set_context=False, **kwargs): # avoid name duplication of edge if not name: name = _uuid() router = self._make_router(fmt or self.fmt, tenant_id, name, admin_state_up, external_gateway_info, set_context, **kwargs) yield router def _recursive_sort_list(self, lst): sorted_list = [] for ele in lst: if isinstance(ele, list): sorted_list.append(self._recursive_sort_list(ele)) elif isinstance(ele, dict): sorted_list.append(self._recursive_sort_dict(ele)) else: sorted_list.append(ele) return sorted(sorted_list, key=helpers.safe_sort_key) def _recursive_sort_dict(self, dct): sorted_dict = {} for k, v in dct.items(): if isinstance(v, list): sorted_dict[k] = self._recursive_sort_list(v) elif isinstance(v, dict): sorted_dict[k] = self._recursive_sort_dict(v) else: sorted_dict[k] = v return sorted_dict def _update_router_enable_snat(self, router_id, network_id, enable_snat): return self._update('routers', router_id, {'router': {'external_gateway_info': {'network_id': network_id, 'enable_snat': enable_snat}}}) def test_floatingip_association_on_unowned_router(self): self.skipTest("Currently no support in plugin for this") def test_router_add_gateway_no_subnet(self): self.skipTest('No support for no subnet gateway set') def test_floatingip_create_different_fixed_ip_same_port(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_router_add_interface_multiple_ipv4_subnet_port_returns_400(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_router_add_interface_multiple_ipv6_subnet_port(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_floatingip_update_different_fixed_ip_same_port(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_create_multiple_floatingips_same_fixed_ip_same_port(self): self.skipTest('Multiple fixed ips on a port are not supported') def _set_net_external(self, net_id): self._update('networks', net_id, {'network': {extnet_apidef.EXTERNAL: True}}) def _add_external_gateway_to_router(self, router_id, network_id, expected_code=webob.exc.HTTPOk.code, neutron_context=None, ext_ips=None): ext_ips = ext_ips or [] body = {'router': {'external_gateway_info': {'network_id': network_id}}} if ext_ips: body['router']['external_gateway_info'][ 'external_fixed_ips'] = ext_ips return self._update('routers', router_id, body, expected_code=expected_code, neutron_context=neutron_context) def test_router_add_gateway_no_subnet_forbidden(self): with self.router() as r: with self.network() as n: self._set_net_external(n['network']['id']) self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], expected_code=webob.exc.HTTPBadRequest.code) class L3NatTestCaseBase(test_l3_plugin.L3NatTestCaseMixin): def test_create_floatingip_with_specific_ip(self): with self.subnet(cidr='10.0.0.0/24', enable_dhcp=False) as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) fp = self._make_floatingip(self.fmt, network_id, floating_ip='10.0.0.10') self.assertEqual('10.0.0.10', fp['floatingip']['floating_ip_address']) def test_floatingip_same_external_and_internal(self): # Select router with subnet's gateway_ip for floatingip when # routers connected to same subnet and external network. with self.subnet(cidr="10.0.0.0/24", enable_dhcp=False) as exs,\ self.subnet(cidr="12.0.0.0/24", gateway_ip="12.0.0.50", enable_dhcp=False) as ins: network_ex_id = exs['subnet']['network_id'] self._set_net_external(network_ex_id) r2i_fixed_ips = [{'ip_address': '12.0.0.2'}] with self.router() as r1,\ self.router() as r2,\ self.port(subnet=ins, fixed_ips=r2i_fixed_ips) as r2i_port: self._add_external_gateway_to_router( r1['router']['id'], network_ex_id) self._router_interface_action('add', r2['router']['id'], None, r2i_port['port']['id']) self._router_interface_action('add', r1['router']['id'], ins['subnet']['id'], None) self._add_external_gateway_to_router( r2['router']['id'], network_ex_id) with self.port(subnet=ins, fixed_ips=[{'ip_address': '12.0.0.8'}] ) as private_port: fp = self._make_floatingip(self.fmt, network_ex_id, private_port['port']['id']) self.assertEqual(r1['router']['id'], fp['floatingip']['router_id']) def test_floatingip_multi_external_one_internal(self): with self.subnet(cidr="10.0.0.0/24", enable_dhcp=False) as exs1,\ self.subnet(cidr="11.0.0.0/24", enable_dhcp=False) as exs2,\ self.subnet(cidr="12.0.0.0/24", enable_dhcp=False) as ins1: network_ex_id1 = exs1['subnet']['network_id'] network_ex_id2 = exs2['subnet']['network_id'] self._set_net_external(network_ex_id1) self._set_net_external(network_ex_id2) r2i_fixed_ips = [{'ip_address': '12.0.0.2'}] with self.router() as r1,\ self.router() as r2,\ self.port(subnet=ins1, fixed_ips=r2i_fixed_ips) as r2i_port: self._add_external_gateway_to_router( r1['router']['id'], network_ex_id1) self._router_interface_action('add', r1['router']['id'], ins1['subnet']['id'], None) self._add_external_gateway_to_router( r2['router']['id'], network_ex_id2) self._router_interface_action('add', r2['router']['id'], None, r2i_port['port']['id']) with self.port(subnet=ins1, fixed_ips=[{'ip_address': '12.0.0.3'}] ) as private_port: fp1 = self._make_floatingip(self.fmt, network_ex_id1, private_port['port']['id']) fp2 = self._make_floatingip(self.fmt, network_ex_id2, private_port['port']['id']) self.assertEqual(fp1['floatingip']['router_id'], r1['router']['id']) self.assertEqual(fp2['floatingip']['router_id'], r2['router']['id']) @mock.patch.object(edge_utils, "update_firewall") def test_router_set_gateway_with_nosnat(self, mock): expected_fw = [{'action': 'allow', 'enabled': True, 'name': 'Subnet Rule', 'source_ip_address': [], 'destination_ip_address': []}] nosnat_fw = [{'action': 'allow', 'enabled': True, 'name': 'No SNAT Rule', 'source_vnic_groups': ["external"], 'destination_ip_address': []}] with self.router() as r1,\ self.subnet() as ext_subnet,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(cidr='12.0.0.0/24') as s2: self._set_net_external(ext_subnet['subnet']['network_id']) self._router_interface_action( 'add', r1['router']['id'], s1['subnet']['id'], None) expected_fw[0]['source_ip_address'] = ['11.0.0.0/24'] expected_fw[0]['destination_ip_address'] = ['11.0.0.0/24'] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual(self._recursive_sort_list(expected_fw), self._recursive_sort_list(fw_rules)) self._add_external_gateway_to_router( r1['router']['id'], ext_subnet['subnet']['network_id']) fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual(self._recursive_sort_list(expected_fw), self._recursive_sort_list(fw_rules)) self._update_router_enable_snat( r1['router']['id'], ext_subnet['subnet']['network_id'], False) nosnat_fw[0]['destination_ip_address'] = ['11.0.0.0/24'] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list(expected_fw + nosnat_fw), self._recursive_sort_list(fw_rules)) self._router_interface_action('add', r1['router']['id'], s2['subnet']['id'], None) expected_fw[0]['source_ip_address'] = ['12.0.0.0/24', '11.0.0.0/24'] expected_fw[0]['destination_ip_address'] = ['12.0.0.0/24', '11.0.0.0/24'] nosnat_fw[0]['destination_ip_address'] = ['11.0.0.0/24', '12.0.0.0/24'] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list(expected_fw + nosnat_fw), self._recursive_sort_list(fw_rules)) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) expected_fw[0]['source_ip_address'] = ['12.0.0.0/24'] expected_fw[0]['destination_ip_address'] = ['12.0.0.0/24'] nosnat_fw[0]['destination_ip_address'] = ['12.0.0.0/24'] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list(expected_fw + nosnat_fw), self._recursive_sort_list(fw_rules)) self._update_router_enable_snat( r1['router']['id'], ext_subnet['subnet']['network_id'], True) fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list(expected_fw), self._recursive_sort_list(fw_rules)) self._router_interface_action('remove', r1['router']['id'], s2['subnet']['id'], None) self._remove_external_gateway_from_router( r1['router']['id'], ext_subnet['subnet']['network_id']) def test_router_add_interface_port_bad_tenant_returns_404(self): self.skipTest('TBD') def test_router_add_interface_subnet_with_bad_tenant_returns_404(self): self.skipTest('TBD') def test_router_add_interface_multiple_ipv6_subnets_same_net(self): """Test router-interface-add for multiple ipv6 subnets on a network. Verify that adding multiple ipv6 subnets from the same network to a router places them all on the same router interface. """ with self.router() as r, self.network() as n: with (self.subnet(network=n, cidr='fd00::1/64', enable_dhcp=False, ip_version=6) ) as s1, self.subnet(network=n, cidr='fd01::1/64', ip_version=6, enable_dhcp=False) as s2: body = self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) pid1 = body['port_id'] body = self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) pid2 = body['port_id'] self.assertEqual(pid1, pid2) port = self._show('ports', pid1) self.assertEqual(2, len(port['port']['fixed_ips'])) port_subnet_ids = [fip['subnet_id'] for fip in port['port']['fixed_ips']] self.assertIn(s1['subnet']['id'], port_subnet_ids) self.assertIn(s2['subnet']['id'], port_subnet_ids) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) def test_router_add_interface_ipv6_port_existing_network_returns_400(self): """Ensure unique IPv6 router ports per network id. Adding a router port containing one or more IPv6 subnets with the same network id as an existing router port should fail. This is so there is no ambiguity regarding on which port to add an IPv6 subnet when executing router-interface-add with a subnet and no port. """ with self.network() as n, self.router() as r: with self.subnet(network=n, cidr='fd00::/64', ip_version=6, enable_dhcp=False) as s1, ( self.subnet(network=n, cidr='fd01::/64', ip_version=6, enable_dhcp=False)) as s2: with self.port(subnet=s1) as p: self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) exp_code = webob.exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], None, p['port']['id'], expected_code=exp_code) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) def test_subnet_dhcp_metadata_with_update(self): self.plugin_instance.metadata_proxy_handler = mock.Mock() with self.subnet(cidr="10.0.0.0/24", enable_dhcp=True) as s1: subnet_id = s1['subnet']['id'] is_dhcp_meta = self.plugin_instance.is_dhcp_metadata( context.get_admin_context(), subnet_id) self.assertTrue(is_dhcp_meta) port_data = {'port': {'tenant_id': s1['subnet']['tenant_id'], 'network_id': s1['subnet']['network_id'], 'device_owner': 'compute:None'}} req = self.new_create_request( 'ports', port_data).get_response(self.api) port_req = self.deserialize(self.fmt, req) subnet_data = {'subnet': {'enable_dhcp': False}} self.new_update_request( 'subnets', subnet_data, s1['subnet']['id']).get_response(self.api) is_dhcp_meta = self.plugin_instance.is_dhcp_metadata( context.get_admin_context(), subnet_id) self.assertFalse(is_dhcp_meta) self.new_delete_request('ports', port_req['port']['id']) def test_router_delete_ipv6_slaac_subnet_inuse_returns_409(self): self.skipTest('No DHCP v6 Support yet') def test_router_delete_dhcpv6_stateless_subnet_inuse_returns_409(self): self.skipTest('No DHCP v6 Support yet') def test_router_add_iface_ipv6_ext_ra_subnet_returns_400(self): self.skipTest('No DHCP v6 Support yet') def test_router_remove_ipv6_subnet_from_interface(self): self.skipTest('No DHCP v6 Support yet') def test_router_update_gateway_add_multiple_prefixes_ipv6(self): self.skipTest('No DHCP v6 Support yet') def test_router_concurrent_delete_upon_subnet_create(self): self.skipTest('No DHCP v6 Support yet') def test_router_update_gateway_upon_subnet_create_ipv6(self): self.skipTest('No DHCP v6 Support yet') def test_router_update_gateway_upon_subnet_create_max_ips_ipv6(self): self.skipTest('No DHCP v6 Support yet') def test_floatingip_via_router_interface_returns_201(self): self.skipTest('not supported') def test_floatingip_via_router_interface_returns_404(self): self.skipTest('not supported') def test_floatingip_update_subnet_gateway_disabled(self): self.skipTest('not supported') class IPv6ExpectedFailuresTestMixin(object): def test_router_add_interface_ipv6_subnet(self): self.skipTest('Not supported') def test_router_add_iface_ipv6_ext_ra_subnet_returns_400(self): # This returns a 400 too, but as an exception is raised the response # code need to be asserted differently with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: super(IPv6ExpectedFailuresTestMixin, self).\ test_router_add_iface_ipv6_ext_ra_subnet_returns_400() self.assertEqual(ctx_manager.exception.code, 400) def test_router_add_gateway_multiple_subnets_ipv6(self): self.skipTest('not supported') class TestExclusiveRouterTestCase(L3NatTest, L3NatTestCaseBase, test_l3_plugin.L3NatDBIntTestCase, IPv6ExpectedFailuresTestMixin, NsxVPluginV2TestCase, test_address_scope.AddressScopeTestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): super(TestExclusiveRouterTestCase, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.plugin_instance.nsx_v.is_subnet_in_use = mock.Mock() self.plugin_instance.nsx_v.is_subnet_in_use.return_value = False self._default_tenant_id = self._tenant_id self._router_tenant_id = 'test-router-tenant' def _create_router(self, fmt, tenant_id, name=None, admin_state_up=None, set_context=False, arg_list=None, **kwargs): tenant_id = tenant_id or _uuid() data = {'router': {'tenant_id': tenant_id}} if name: data['router']['name'] = name if admin_state_up: data['router']['admin_state_up'] = admin_state_up for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())): # Arg must be present and not empty if arg in kwargs and kwargs[arg]: data['router'][arg] = kwargs[arg] data['router']['router_type'] = kwargs.get('router_type', 'exclusive') router_req = self.new_create_request('routers', data, fmt) if set_context and tenant_id: # create a specific auth context for this request router_req.environ['neutron.context'] = context.Context( '', tenant_id) return router_req.get_response(self.ext_api) def _test_create_l3_ext_network(self, vlan_id=0): name = 'l3_ext_net' expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (extnet_apidef.EXTERNAL, True)] with self._create_l3_ext_network(vlan_id) as net: for k, v in expected: self.assertEqual(net['network'][k], v) def test_create_router_fail_at_the_backend(self): p = directory.get_plugin() edge_manager = p.edge_manager with mock.patch.object(edge_manager, 'create_lrouter', side_effect=[n_exc.NeutronException]): router = {'router': {'admin_state_up': True, 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', 'tenant_id': 'fake_tenant', 'router_type': 'exclusive'}} self.assertRaises(n_exc.NeutronException, p.create_router, context.get_admin_context(), router) self._test_list_resources('router', ()) def test_create_l3_ext_network_with_dhcp(self): with self._create_l3_ext_network() as net: with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: with self.subnet(network=net, enable_dhcp=True): self.assertEqual(ctx_manager.exception.code, 400) def test_create_l3_ext_network_without_vlan(self): self._test_create_l3_ext_network() def _test_router_create_with_gwinfo_and_l3_ext_net(self, vlan_id=None, validate_ext_gw=False, router_ctx=None): tenant_id = self._router_tenant_id if router_ctx else self._tenant_id with self._create_l3_ext_network(vlan_id) as net: with self.subnet(network=net, enable_dhcp=False) as s: data = {'router': {'tenant_id': tenant_id}} data['router']['name'] = 'router1' data['router']['external_gateway_info'] = { 'network_id': s['subnet']['network_id']} router_req = self.new_create_request( 'routers', data, self.fmt, context=router_ctx) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) self.assertEqual( s['subnet']['network_id'], (router['router']['external_gateway_info'] ['network_id'])) if validate_ext_gw: pass def test_router_create_with_gwinfo_and_l3_ext_net(self): self._test_router_create_with_gwinfo_and_l3_ext_net() def test_router_create_with_gwinfo_and_l3_ext_net_with_vlan(self): self._test_router_create_with_gwinfo_and_l3_ext_net(444) def test_router_create_with_gwinfo_and_l3_ext_net_with_non_admin(self): ctx = context.Context(user_id=None, tenant_id=self._router_tenant_id, is_admin=False) self._test_router_create_with_gwinfo_and_l3_ext_net(router_ctx=ctx) def test_router_create_with_different_sizes(self): data = {'router': { 'tenant_id': 'whatever', 'name': 'test_router', 'router_type': 'exclusive'}} for size in ['compact', 'large', 'xlarge', 'quadlarge']: data['router']['router_size'] = size router_req = self.new_create_request('routers', data, self.fmt) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) self.assertEqual(size, router['router']['router_size']) def test_router_create_overriding_default_edge_size(self): data = {'router': { 'tenant_id': 'whatever', 'name': 'test_router', 'router_type': 'exclusive'}} cfg.CONF.set_override('exclusive_router_appliance_size', 'xlarge', group='nsxv') router_req = self.new_create_request('routers', data, self.fmt) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) self.assertEqual('xlarge', router['router']['router_size']) def test_router_add_gateway_invalid_network_returns_404(self): # NOTE(salv-orlando): This unit test has been overridden # as the nsx plugin support the ext_gw_mode extension # which mandates an uuid for the external network identifier with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], uuidutils.generate_uuid(), expected_code=webob.exc.HTTPNotFound.code) def test_router_rename(self): with self.router(name='old_name') as r: with mock.patch.object(edge_appliance_driver.EdgeApplianceDriver, 'rename_edge') as edge_rename: new_name = 'new_name' router_id = r['router']['id'] # get the edge of this router plugin = directory.get_plugin() router_obj = ex_router_driver.RouterExclusiveDriver(plugin) ctx = context.get_admin_context() edge_id = router_obj._get_edge_id_or_raise(ctx, router_id) # update the name body = self._update('routers', router_id, {'router': {'name': new_name}}) self.assertEqual(new_name, body['router']['name']) edge_rename.assert_called_once_with( edge_id, new_name + '-' + router_id) def test_router_resize(self): with self.router() as r: with mock.patch.object(edge_appliance_driver.EdgeApplianceDriver, 'resize_edge') as edge_resize: new_size = 'large' router_id = r['router']['id'] # get the edge of this router plugin = directory.get_plugin() router_obj = ex_router_driver.RouterExclusiveDriver(plugin) ctx = context.get_admin_context() edge_id = router_obj._get_edge_id_or_raise(ctx, router_id) # update the router size body = self._update('routers', router_id, {'router': {'router_size': new_size}}) self.assertEqual(new_size, body['router']['router_size']) edge_resize.assert_called_once_with(edge_id, new_size) def _test_router_update_gateway_on_l3_ext_net(self, vlan_id=None, validate_ext_gw=False, distributed=False, router_ctx=None): if router_ctx: self._tenant_id = self._router_tenant_id with self.router( arg_list=('distributed',), distributed=distributed, set_context=True, tenant_id=self._tenant_id) as r: self._tenant_id = self._default_tenant_id with self.subnet() as s1: with self._create_l3_ext_network(vlan_id) as net: with self.subnet(network=net, enable_dhcp=False) as s2: self._set_net_external(s1['subnet']['network_id']) try: self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id'], neutron_context=router_ctx) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s1['subnet']['network_id']) # Plug network with external mapping self._set_net_external(s2['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id'], neutron_context=router_ctx) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s2['subnet']['network_id']) if validate_ext_gw: pass finally: # Cleanup self._remove_external_gateway_from_router( r['router']['id'], s2['subnet']['network_id']) def test_router_update_gateway_on_l3_ext_net(self): self._test_router_update_gateway_on_l3_ext_net() def test_router_update_gateway_on_l3_ext_net_with_non_admin(self): ctx = context.Context(user_id=None, tenant_id=self._router_tenant_id, is_admin=False) self._test_router_update_gateway_on_l3_ext_net(router_ctx=ctx) def test_router_update_gateway_on_l3_ext_net_with_vlan(self): self._test_router_update_gateway_on_l3_ext_net(444) def test_router_update_gateway_with_existing_floatingip(self): with self._create_l3_ext_network() as net: with self.subnet(network=net, enable_dhcp=False) as subnet: with self.floatingip_with_assoc() as fip: self._add_external_gateway_to_router( fip['floatingip']['router_id'], subnet['subnet']['network_id'], expected_code=webob.exc.HTTPConflict.code) def test_router_list_by_tenant_id(self): with self.router(), self.router(): with self.router(tenant_id='custom') as router: self._test_list_resources('router', [router], query_params="tenant_id=custom") def test_create_l3_ext_network_with_vlan(self): self._test_create_l3_ext_network(666) def test_floatingip_with_assoc_fails(self): self._test_floatingip_with_assoc_fails( self._plugin_name + '._check_and_get_fip_assoc') def test_floatingip_with_invalid_create_port(self): self._test_floatingip_with_invalid_create_port(self._plugin_name) def test_floatingip_update(self): super(TestExclusiveRouterTestCase, self).test_floatingip_update( constants.FLOATINGIP_STATUS_DOWN) def test_floating_ip_no_snat(self): """Cannot add floating ips to a router with disabled snat""" with self.router() as r1,\ self.subnet() as ext_subnet,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.port(subnet=s1) as private_port: # Add interfaces to the router self._router_interface_action( 'add', r1['router']['id'], s1['subnet']['id'], None) self._set_net_external(ext_subnet['subnet']['network_id']) self._add_external_gateway_to_router( r1['router']['id'], ext_subnet['subnet']['network_id']) # disable snat self._update_router_enable_snat( r1['router']['id'], ext_subnet['subnet']['network_id'], False) # create a floating ip and associate it to the router should fail self.assertRaises( object, self._make_floatingip, self.fmt, ext_subnet['subnet']['network_id'], private_port['port']['id']) # now enable snat and try again self._update_router_enable_snat( r1['router']['id'], ext_subnet['subnet']['network_id'], True) self._make_floatingip( self.fmt, ext_subnet['subnet']['network_id'], private_port['port']['id']) # now shouldn't be able to disable snat self.assertRaises( object, self._update_router_enable_snat, r1['router']['id'], ext_subnet['subnet']['network_id'], False) def test_floatingip_disassociate(self): with self.port() as p: private_sub = {'subnet': {'id': p['port']['fixed_ips'][0]['subnet_id']}} with self.floatingip_no_assoc(private_sub) as fip: self.assertEqual(fip['floatingip']['status'], constants.FLOATINGIP_STATUS_DOWN) port_id = p['port']['id'] body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': port_id}}) self.assertEqual(body['floatingip']['port_id'], port_id) self.assertEqual(body['floatingip']['status'], constants.FLOATINGIP_STATUS_ACTIVE) # Disassociate body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': None}}) body = self._show('floatingips', fip['floatingip']['id']) self.assertIsNone(body['floatingip']['port_id']) self.assertIsNone(body['floatingip']['fixed_ip_address']) self.assertEqual(body['floatingip']['status'], constants.FLOATINGIP_STATUS_DOWN) def test_update_floatingip_with_edge_router_update_failure(self): p = directory.get_plugin() with self.subnet() as subnet,\ self.port(subnet=subnet) as p1,\ self.port(subnet=subnet) as p2: p1_id = p1['port']['id'] p2_id = p2['port']['id'] with self.floatingip_with_assoc(port_id=p1_id) as fip: with self._mock_edge_router_update_with_exception(): self.assertRaises(object, p.update_floatingip, context.get_admin_context(), fip['floatingip']['id'], floatingip={'floatingip': {'port_id': p2_id}}) res = self._list( 'floatingips', query_params="port_id=%s" % p1_id) self.assertEqual(len(res['floatingips']), 1) res = self._list( 'floatingips', query_params="port_id=%s" % p2_id) self.assertEqual(len(res['floatingips']), 0) def test_create_floatingip_with_edge_router_update_failure(self): p = directory.get_plugin() with self.subnet(cidr='200.0.0.0/24') as public_sub: public_network_id = public_sub['subnet']['network_id'] self._set_net_external(public_network_id) with self.port() as private_port: port_id = private_port['port']['id'] tenant_id = private_port['port']['tenant_id'] subnet_id = private_port['port']['fixed_ips'][0]['subnet_id'] with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action('add', r['router']['id'], subnet_id, None) floatingip = {'floatingip': { 'tenant_id': tenant_id, 'floating_network_id': public_network_id, 'port_id': port_id}} with self._mock_edge_router_update_with_exception(): self.assertRaises(object, p.create_floatingip, context.get_admin_context(), floatingip=floatingip) res = self._list( 'floatingips', query_params="port_id=%s" % port_id) self.assertEqual(len(res['floatingips']), 0) # Cleanup self._router_interface_action('remove', r['router']['id'], subnet_id, None) self._remove_external_gateway_from_router( r['router']['id'], public_network_id) @contextlib.contextmanager def _mock_edge_router_update_with_exception(self): nsx_router_update = PLUGIN_NAME + '._update_edge_router' with mock.patch(nsx_router_update) as update_edge: update_edge.side_effect = object() yield update_edge @mock.patch.object(edge_utils, "update_firewall") def test_router_interfaces_with_update_firewall(self, mock): s1_cidr = '10.0.0.0/24' s2_cidr = '11.0.0.0/24' with self.router() as r,\ self.subnet(cidr=s1_cidr) as s1,\ self.subnet(cidr=s2_cidr) as s2: self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) expected_cidrs = [s1_cidr, s2_cidr] expected_fw = [{'action': 'allow', 'enabled': True, 'name': 'Subnet Rule', 'source_ip_address': expected_cidrs, 'destination_ip_address': expected_cidrs}] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual(self._recursive_sort_list(expected_fw), self._recursive_sort_list(fw_rules)) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) @mock.patch.object(edge_utils, "update_firewall") def test_router_interfaces_with_update_firewall_metadata(self, mock): self.plugin_instance.metadata_proxy_handler = mock.Mock() s1_cidr = '10.0.0.0/24' s2_cidr = '11.0.0.0/24' with self.router() as r,\ self.subnet(cidr=s1_cidr) as s1,\ self.subnet(cidr=s2_cidr) as s2: self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) # build the list of expected fw rules expected_cidrs = [s1_cidr, s2_cidr] fw_rule = {'action': 'allow', 'enabled': True, 'name': 'Subnet Rule', 'source_ip_address': expected_cidrs, 'destination_ip_address': expected_cidrs} vse_rule = {'action': 'allow', 'enabled': True, 'name': 'VSERule', 'source_vnic_groups': ['vse']} dest_intern = [md_proxy.INTERNAL_SUBNET] md_inter = {'action': 'deny', 'destination_ip_address': dest_intern, 'enabled': True, 'name': 'MDInterEdgeNet'} dest_srvip = [md_proxy.METADATA_IP_ADDR] md_srvip = {'action': 'allow', 'destination_ip_address': dest_srvip, 'destination_port': '80,443,8775', 'enabled': True, 'name': 'MDServiceIP', 'protocol': 'tcp'} expected_fw = [fw_rule, vse_rule, md_inter, md_srvip] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual(self._recursive_sort_list(expected_fw), self._recursive_sort_list(fw_rules)) # Also test the md_srvip conversion: drv = edge_firewall_driver.EdgeFirewallDriver() rule = drv._convert_firewall_rule(md_srvip) exp_service = {'service': [{'port': [80, 443, 8775], 'protocol': 'tcp'}]} exp_rule = {'action': 'accept', 'application': exp_service, 'destination': {'ipAddress': dest_srvip}, 'enabled': True, 'name': 'MDServiceIP'} self.assertEqual(exp_rule, rule) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) @mock.patch.object(edge_utils, "update_firewall") def test_router_interfaces_with_update_firewall_metadata_conf(self, mock): """Test the metadata proxy firewall rule with configured ports """ cfg.CONF.set_override('metadata_service_allowed_ports', ['55', ' 66 ', '55', '77'], group='nsxv') self.plugin_instance.metadata_proxy_handler = mock.Mock() s1_cidr = '10.0.0.0/24' with self.router() as r,\ self.subnet(cidr=s1_cidr) as s1: self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) # build the expected fw rule # at this stage the string of ports is not sorted/unique/validated dest_srvip = [md_proxy.METADATA_IP_ADDR] rule_name = 'MDServiceIP' md_srvip = {'action': 'allow', 'destination_ip_address': dest_srvip, 'destination_port': '80,443,8775,55,66,55,77', 'enabled': True, 'name': rule_name, 'protocol': 'tcp'} # compare it to the rule with the same name fw_rules = mock.call_args[0][3]['firewall_rule_list'] rule_found = False for fw_rule in fw_rules: if (validators.is_attr_set(fw_rule.get("name")) and fw_rule['name'] == rule_name): self.assertEqual(md_srvip, fw_rule) rule_found = True break self.assertTrue(rule_found) # Also test the rule conversion # Ports should be sorted & unique, and ignore non numeric values drv = edge_firewall_driver.EdgeFirewallDriver() rule = drv._convert_firewall_rule(md_srvip) exp_service = {'service': [{'port': [55, 66, 77, 80, 443, 8775], 'protocol': 'tcp'}]} exp_rule = {'action': 'accept', 'application': exp_service, 'destination': {'ipAddress': dest_srvip}, 'enabled': True, 'name': 'MDServiceIP'} self.assertEqual(exp_rule, rule) @mock.patch.object(edge_utils, "update_firewall") def test_router_interfaces_different_tenants_update_firewall(self, mock): tenant_id = _uuid() other_tenant_id = _uuid() s1_cidr = '10.0.0.0/24' s2_cidr = '11.0.0.0/24' with self.router(tenant_id=tenant_id) as r,\ self.network(tenant_id=tenant_id) as n1,\ self.network(tenant_id=other_tenant_id) as n2,\ self.subnet(network=n1, cidr=s1_cidr) as s1,\ self.subnet(network=n2, cidr=s2_cidr) as s2: self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None, tenant_id=tenant_id) expected_cidrs = [s1_cidr, s2_cidr] expected_fw = [{'action': 'allow', 'enabled': True, 'name': 'Subnet Rule', 'source_ip_address': expected_cidrs, 'destination_ip_address': expected_cidrs}] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual(self._recursive_sort_list(expected_fw), self._recursive_sort_list(fw_rules)) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None, tenant_id=tenant_id) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) expected_fw = [] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual(expected_fw, fw_rules) def test_create_router_gateway_fails(self): self.skipTest('not supported') def test_migrate_exclusive_router_to_shared(self): with self._create_l3_ext_network() as net: with self.subnet(network=net, enable_dhcp=False) as s: data = {'router': {'tenant_id': 'whatever'}} data['router']['name'] = 'router1' data['router']['external_gateway_info'] = { 'network_id': s['subnet']['network_id']} data['router']['router_type'] = 'exclusive' router_req = self.new_create_request('routers', data, self.fmt) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) # update the router type: router_id = router['router']['id'] self._update('routers', router_id, {'router': {'router_type': 'shared'}}) # get the updated router and check it's type body = self._show('routers', router_id) self.assertEqual('shared', body['router']['router_type']) @mock.patch.object(edge_utils.EdgeManager, 'update_interface_addr') def test_router_update_gateway_with_different_external_subnet(self, mock): # This test calls the backend, so we need a mock for the edge_utils super( TestExclusiveRouterTestCase, self).test_router_update_gateway_with_different_external_subnet() @mock.patch.object(edge_utils.EdgeManager, 'update_interface_addr') def test_router_add_interface_multiple_ipv6_subnets_same_net(self, mock): # This test calls the backend, so we need a mock for the edge_utils super( TestExclusiveRouterTestCase, self).test_router_add_interface_multiple_ipv6_subnets_same_net() def _fake_rename_edge(self, edge_id, name): raise vcns_exc.VcnsApiException( status=400, header={'status': 200}, uri='fake_url', response='') def test_create_router_with_update_error(self): p = directory.get_plugin() # make sure there is an available edge so we will use backend update available_edge = {'edge_id': 'edge-11', 'router_id': 'fake_id'} nsxv_db.add_nsxv_router_binding( context.get_admin_context().session, available_edge['router_id'], available_edge['edge_id'], None, constants.ACTIVE) with mock.patch.object(p.edge_manager, '_get_available_router_binding', return_value=available_edge): # Mock for update_edge task failure with mock.patch.object( p.edge_manager.nsxv_manager, 'rename_edge', side_effect=self._fake_rename_edge): router = {'router': {'admin_state_up': True, 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', 'tenant_id': 'fake_tenant', 'router_type': 'exclusive'}} # router creation should succeed returned_router = p.create_router(context.get_admin_context(), router) # router status should be 'error' self.assertEqual(constants.ERROR, returned_router['status']) # check the same after get_router new_router = p.get_router(context.get_admin_context(), returned_router['id']) self.assertEqual(constants.ERROR, new_router['status']) def test_create_router_with_bad_az_hint(self): p = directory.get_plugin() router = {'router': {'admin_state_up': True, 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', 'tenant_id': 'fake_tenant', 'router_type': 'exclusive', 'availability_zone_hints': ['bad_hint']}} self.assertRaises(n_exc.NeutronException, p.create_router, context.get_admin_context(), router) def test_create_router_with_az_hint(self): az_name = 'az7' set_az_in_config(az_name) p = directory.get_plugin() p._availability_zones_data = nsx_az.NsxVAvailabilityZones() p._get_edge_id_by_rtr_id = p.real_get_edge router = {'router': {'admin_state_up': True, 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', 'tenant_id': 'fake_tenant', 'router_type': 'exclusive', 'availability_zone_hints': [az_name]}} # router creation should succeed returned_router = p.create_router(context.get_admin_context(), router) self.assertEqual([az_name], returned_router['availability_zone_hints']) self.assertEqual([az_name], returned_router['availability_zones']) def test_create_router_with_default_az(self): az_name = 'az7' set_az_in_config(az_name) cfg.CONF.set_override('default_availability_zones', [az_name]) p = directory.get_plugin() p._availability_zones_data = nsx_az.NsxVAvailabilityZones() p._get_edge_id_by_rtr_id = p.real_get_edge router = {'router': {'admin_state_up': True, 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', 'tenant_id': 'fake_tenant', 'router_type': 'exclusive'}} # router creation should succeed returned_router = p.create_router(context.get_admin_context(), router) self.assertEqual([], returned_router['availability_zone_hints']) self.assertEqual([az_name], returned_router['availability_zones']) def test_floatingip_update_to_same_port_id_twice(self): self.skipTest('Plugin changes floating port status') def test_router_add_interface_ipv6_subnet(self): self.skipTest('Not supported') def test_router_add_gateway_multiple_subnets_ipv6(self): self.skipTest('not supported') def test_update_subnet_gateway_for_external_net(self): plugin = directory.get_plugin() router_obj = ex_router_driver.RouterExclusiveDriver(plugin) with mock.patch.object(plugin, '_find_router_driver', return_value=router_obj): with mock.patch.object(router_obj, '_update_nexthop') as update_nexthop: super(TestExclusiveRouterTestCase, self).test_update_subnet_gateway_for_external_net() self.assertTrue(update_nexthop.called) def _test_create_subnetpool(self, prefixes, expected=None, admin=False, **kwargs): keys = kwargs.copy() keys.setdefault('tenant_id', self._tenant_id) with self.subnetpool(prefixes, admin, **keys) as subnetpool: self._validate_resource(subnetpool, keys, 'subnetpool') if expected: self._compare_resource(subnetpool, expected, 'subnetpool') return subnetpool def test_router_no_snat_with_different_address_scope(self): """Test that if the router has no snat, you cannot add an interface from a different address scope than the gateway. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self.network() as ext_net: self._set_net_external(ext_net['network']['id']) as_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') subnetpool = self._test_create_subnetpool( [subnet.cidr], name='sp1', min_prefixlen='24', address_scope_id=as_id) subnetpool_id = subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': ext_net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'enable_dhcp': False, 'tenant_id': ext_net['network']['tenant_id']}} req = self.new_create_request('subnets', data) ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) # create a regular network on another address scope with self.address_scope(name='as2') as addr_scope2, \ self.network() as net: as_id2 = addr_scope2['address_scope']['id'] subnet2 = netaddr.IPNetwork('20.10.10.0/24') subnetpool2 = self._test_create_subnetpool( [subnet2.cidr], name='sp2', min_prefixlen='24', address_scope_id=as_id2) subnetpool_id2 = subnetpool2['subnetpool']['id'] data = {'subnet': { 'network_id': net['network']['id'], 'subnetpool_id': subnetpool_id2, 'ip_version': 4, 'tenant_id': net['network']['tenant_id']}} req = self.new_create_request('subnets', data) int_subnet = self.deserialize( self.fmt, req.get_response(self.api)) # create a no snat router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['subnet']['network_id']) self._update_router_enable_snat( r['router']['id'], ext_subnet['subnet']['network_id'], False) # should fail adding the interface to the router err_code = webob.exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], int_subnet['subnet']['id'], None, err_code) def _create_subnet_and_add_to_router(self, subnetpool_id, router_id): # create a regular network on the given subnet pool with self.network() as net: data = {'subnet': { 'network_id': net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'tenant_id': net['network']['tenant_id']}} req = self.new_create_request('subnets', data) int_subnet = self.deserialize( self.fmt, req.get_response(self.api)) # Add the interface to the router self._router_interface_action( 'add', router_id, int_subnet['subnet']['id'], None) return int_subnet def test_router_no_snat_with_same_address_scope(self): """Test that if the router has no snat, you can add an interface from the same address scope as the gateway. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self.network() as ext_net: self._set_net_external(ext_net['network']['id']) as_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool( [subnet.cidr], name='sp1', min_prefixlen='24', address_scope_id=as_id) subnetpool_id = subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': ext_net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'enable_dhcp': False, 'tenant_id': ext_net['network']['tenant_id']}} req = self.new_create_request('subnets', data) ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) # create a regular network on the same address scope # and create a no snat router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['subnet']['network_id']) self._update_router_enable_snat( r['router']['id'], ext_subnet['subnet']['network_id'], False) # should succeed adding the interface to the router self._create_subnet_and_add_to_router( subnetpool_id, r['router']['id']) def test_router_address_scope_snat_rules(self): """Test that if the router interface had the same address scope as the gateway - snat rule is not added, but firewall rule is. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self.network() as ext_net: self._set_net_external(ext_net['network']['id']) as_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool( [subnet.cidr], name='sp1', min_prefixlen='24', address_scope_id=as_id) subnetpool_id = subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': ext_net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'enable_dhcp': False, 'tenant_id': ext_net['network']['tenant_id']}} req = self.new_create_request('subnets', data) ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) # create a regular network on the same address scope # and create a router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['subnet']['network_id']) # Add the interface to the router with mock.patch.object( edge_utils, 'update_nat_rules') as update_nat,\ mock.patch.object( edge_utils, 'update_firewall') as update_fw: int_subnet = self._create_subnet_and_add_to_router( subnetpool_id, r['router']['id']) # make sure snat rules are not added update_nat.assert_called_once_with( mock.ANY, mock.ANY, r['router']['id'], [], []) # check fw rules fw_rules = update_fw.call_args[0][3][ 'firewall_rule_list'] self.assertEqual(2, len(fw_rules)) self.assertEqual('Allocation Pool Rule', fw_rules[1]['name']) self.assertEqual('allow', fw_rules[1]['action']) self.assertEqual( int_subnet['subnet']['cidr'], fw_rules[1]['destination_ip_address'][0]) self.assertEqual('external', fw_rules[1]['source_vnic_groups'][0]) def test_router_address_scope_fw_rules(self): """Test that if the router interfaces has different address scope there are separate fw rules """ # create a router, networks, and address scopes with self.address_scope(name='as1') as addr_scope1, \ self.address_scope(name='as2') as addr_scope2, \ self.router() as r: as1_id = addr_scope1['address_scope']['id'] as2_id = addr_scope2['address_scope']['id'] pool1 = netaddr.IPNetwork('10.10.10.0/21') subnetpool1 = self._test_create_subnetpool( [pool1.cidr], name='sp1', min_prefixlen='24', address_scope_id=as1_id) pool2 = netaddr.IPNetwork('20.20.20.0/21') subnetpool2 = self._test_create_subnetpool( [pool2.cidr], name='sp2', min_prefixlen='24', address_scope_id=as2_id) subnetpool_id1 = subnetpool1['subnetpool']['id'] subnetpool_id2 = subnetpool2['subnetpool']['id'] # Add the interfaces to the router with mock.patch.object( edge_utils, 'update_nat_rules'),\ mock.patch.object(edge_utils, 'update_firewall') as update_fw: # create subnets on the 2 subnet pools, and attach to router subnet1 = self._create_subnet_and_add_to_router( subnetpool_id1, r['router']['id']) subnet2 = self._create_subnet_and_add_to_router( subnetpool_id2, r['router']['id']) subnet3 = self._create_subnet_and_add_to_router( subnetpool_id2, r['router']['id']) expected_rules = [ {'enabled': True, 'destination_ip_address': [subnet1['subnet']['cidr']], 'action': 'allow', 'name': 'Subnet Rule', 'source_ip_address': [subnet1['subnet']['cidr']]}, {'enabled': True, 'destination_ip_address': [subnet2['subnet']['cidr'], subnet3['subnet']['cidr']], 'action': 'allow', 'name': 'Subnet Rule', 'source_ip_address': [subnet2['subnet']['cidr'], subnet3['subnet']['cidr']]}] # check the final fw rules fw_rules = update_fw.call_args[0][3][ 'firewall_rule_list'] self.assertEqual(2, len(fw_rules)) self.assertEqual(self._recursive_sort_list(expected_rules), self._recursive_sort_list(fw_rules)) def _prepare_external_subnet_on_address_scope(self, ext_net, address_scope): self._set_net_external(ext_net['network']['id']) as_id = address_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool( [subnet.cidr], name='sp1', min_prefixlen='24', address_scope_id=as_id) subnetpool_id = subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': ext_net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'enable_dhcp': False, 'tenant_id': ext_net['network']['tenant_id']}} req = self.new_create_request('subnets', data) ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) return ext_subnet['subnet'] def _test_router_address_scope_change(self, change_gw=False): """When subnetpool address scope changes, and router that was originally under same address scope, results having different address scopes, relevant snat rules are added. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self.network() as ext_net: ext_subnet = self._prepare_external_subnet_on_address_scope( ext_net, addr_scope) # create a router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['network_id']) # create a regular network on same address scope # and verify no snat change as_id = addr_scope['address_scope']['id'] subnet2 = netaddr.IPNetwork('40.10.10.0/24') subnetpool2 = self._test_create_subnetpool( [subnet2.cidr], name='sp2', min_prefixlen='24', address_scope_id=as_id) subnetpool2_id = subnetpool2['subnetpool']['id'] self._create_subnet_and_add_to_router( subnetpool2_id, r['router']['id']) # change address scope of the first subnetpool with self.address_scope(name='as2') as addr_scope2,\ mock.patch.object(edge_utils, 'update_nat_rules') as update_nat,\ mock.patch.object(edge_utils, 'update_firewall') as update_fw: as2_id = addr_scope2['address_scope']['id'] data = {'subnetpool': { 'address_scope_id': as2_id}} if change_gw: subnetpool_to_update = ext_subnet['subnetpool_id'] else: subnetpool_to_update = subnetpool2_id req = self.new_update_request('subnetpools', data, subnetpool_to_update) req.get_response(self.api) # Verify that the snat & fw rule are being updated update_nat.assert_called_once() update_fw.assert_called_once() def test_router_address_scope_change(self): self._test_router_address_scope_change() def test_router_address_scope_gw_change(self): self._test_router_address_scope_change(change_gw=True) class ExtGwModeTestCase(NsxVPluginV2TestCase, test_ext_gw_mode.ExtGwModeIntTestCase): def test_router_gateway_set_fail_after_port_create(self): self.skipTest("TBD") class NsxVSecurityGroupsTestCase(ext_sg.SecurityGroupDBTestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): test_utils.override_nsx_ini_test() mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) mock_vcns_instance = mock_vcns.start() self.fc2 = fake_vcns.FakeVcns() mock_vcns_instance.return_value = self.fc2 edge_utils.query_dhcp_service_config = mock.Mock(return_value=[]) mock_create_dhcp_service = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, 'create_dhcp_edge_service')) mock_create_dhcp_service.start() mock_update_dhcp_service = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, 'update_dhcp_edge_service')) mock_update_dhcp_service.start() mock_delete_dhcp_service = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, 'delete_dhcp_edge_service')) mock_delete_dhcp_service.start() mock_check_backup_edge_pools = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, '_check_backup_edge_pools')) mock_check_backup_edge_pools.start() mock_process_security_group_logging = mock.patch( 'vmware_nsx.plugin.NsxVPlugin.' '_process_security_groups_rules_logging') mock_process_security_group_logging.start() c_utils.spawn_n = mock.Mock(side_effect=lambda f: f()) super(NsxVSecurityGroupsTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) self.plugin = directory.get_plugin() self.addCleanup(self.fc2.reset_all) class NsxVTestSecurityGroup(ext_sg.TestSecurityGroups, NsxVSecurityGroupsTestCase): @mock.patch.object(edge_utils.EdgeManager, '_deploy_edge') def setUp(self, mock_deploy, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): super(NsxVTestSecurityGroup, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) plugin_instance = directory.get_plugin() plugin_instance._get_edge_id_by_rtr_id = mock.Mock() plugin_instance._get_edge_id_by_rtr_id.return_value = False plugin_instance._get_edge_id_and_az_by_rtr_id = mock.Mock() plugin_instance._get_edge_id_and_az_by_rtr_id.return_value = ( False, False) def test_list_ports_security_group(self): with self.network() as n: with self.subnet(n, enable_dhcp=False): self._create_port(self.fmt, n['network']['id']) req = self.new_list_request('ports') res = req.get_response(self.api) ports = self.deserialize(self.fmt, res) port = ports['ports'][0] self.assertEqual(len(port[secgrp.SECURITYGROUPS]), 1) self._delete('ports', port['id']) def test_vnic_security_group_membership(self): p = directory.get_plugin() self.fc2.add_member_to_security_group = ( mock.Mock().add_member_to_security_group) self.fc2.remove_member_from_security_group = ( mock.Mock().remove_member_from_security_group) nsx_sg_id = str(self.fc2._securitygroups['ids']) device_id = _uuid() port_index = 0 # The expected vnic-id format by NsxV vnic_id = '%s.%03d' % (device_id, port_index) with self.port(device_id=device_id, device_owner='compute:None') as port: (self.fc2.add_member_to_security_group .assert_called_once_with(p.sg_container_id, nsx_sg_id)) self.fc2.add_member_to_security_group.reset_mock() data = {'port': {'vnic_index': port_index}} self.new_update_request('ports', data, port['port']['id']).get_response(self.api) # The vnic should be added as a member to the nsx-security-groups # which match the port security-groups (self.fc2.add_member_to_security_group .assert_called_once_with(nsx_sg_id, vnic_id)) # The vnic should be removed from the nsx-security-groups which match # the deleted port security-groups #TODO(kobis): Port is not removed automatically # (self.fc2.remove_member_from_security_group # .assert_called_once_with(nsx_sg_id, vnic_id)) def test_create_secgroup_deleted_upon_fw_section_create_fail(self): _context = context.Context('', 'tenant_id') sg = {'security_group': {'name': 'default', 'tenant_id': 'tenant_id', 'description': ''}} expected_id = str(self.fc2._securitygroups['ids']) with mock.patch.object(self.fc2, 'create_section') as create_section: with mock.patch.object(self.fc2, 'delete_security_group') as delete_sg: create_section.side_effect = webob.exc.HTTPInternalServerError self.assertRaises(webob.exc.HTTPInternalServerError, self.plugin.create_security_group, _context.elevated(), sg, default_sg=True) delete_sg.assert_called_once_with(expected_id) def test_create_security_group_rule_duplicate_rules(self): name = 'webservers' description = 'my webservers' with mock.patch.object(self.plugin.nsx_v.vcns, 'remove_rule_from_section') as rm_rule_mock: with self.security_group(name, description) as sg: rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', constants.PROTO_NAME_TCP, '22', '22') self._create_security_group_rule(self.fmt, rule) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) rm_rule_mock.assert_called_once_with(mock.ANY, mock.ANY) def test_create_security_group_rule_with_specific_id(self): # This test is aimed to test the security-group db mixin pass def _plugin_update_security_group(self, context, id, logging): data = {'security_group': {'logging': logging}} security_group = ( self.plugin.update_security_group(context, id, data)) return security_group def _plugin_create_security_group(self, context, logging=False): data = {'security_group': {'name': 'SG', 'tenant_id': 'tenant_id', 'description': ''}} if logging: data['security_group']['logging'] = True security_group = ( self.plugin.create_security_group(context, data, False)) return security_group def test_create_security_group_default_logging(self): _context = context.get_admin_context() sg = self._plugin_create_security_group(_context) self.assertFalse(sg['logging']) def test_create_security_group_with_logging(self): _context = context.get_admin_context() sg = self._plugin_create_security_group(_context, logging=True) self.assertTrue(sg['logging']) def test_update_security_group_with_logging(self): _context = context.get_admin_context() sg = self._plugin_create_security_group(_context) sg = self._plugin_update_security_group(_context, sg['id'], True) self.assertTrue(sg['logging']) def test_create_security_group_rule_bulk(self): """Verify that bulk rule create updates the backend section once""" fake_update_sect = self.fc2.update_section def mock_update_section(section_uri, request, h): return fake_update_sect(section_uri, request, h) plugin = directory.get_plugin() with self.security_group() as sg,\ mock.patch.object(plugin.nsx_v.vcns, 'update_section', side_effect=mock_update_section) as update_sect: rule1 = self._build_security_group_rule(sg['security_group']['id'], 'ingress', 'tcp', '22', '22', '10.0.0.1/24') rule2 = self._build_security_group_rule(sg['security_group']['id'], 'ingress', 'tcp', '23', '23', '10.0.0.1/24') rules = {'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) ret = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) self.assertEqual(2, len(ret['security_group_rules'])) update_sect.assert_called_once() class TestVdrTestCase(L3NatTest, L3NatTestCaseBase, test_l3_plugin.L3NatDBIntTestCase, IPv6ExpectedFailuresTestMixin, NsxVPluginV2TestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): # init the availability zones in the configuration of the plugin self.az_name = 'az7' set_az_in_config(self.az_name) super(TestVdrTestCase, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.plugin_instance.nsx_v.is_subnet_in_use = mock.Mock() self.plugin_instance.nsx_v.is_subnet_in_use.return_value = False self._default_tenant_id = self._tenant_id self._router_tenant_id = 'test-router-tenant' @mock.patch.object(edge_utils.EdgeManager, 'update_interface_addr') def test_router_update_gateway_with_different_external_subnet(self, mock): # This test calls the backend, so we need a mock for the edge_utils super( TestVdrTestCase, self).test_router_update_gateway_with_different_external_subnet() def test_floatingip_multi_external_one_internal(self): self.skipTest('skipped') def test_router_add_gateway_multiple_subnets_ipv6(self): self.skipTest('not supported') def test_router_add_interface_ipv6_subnet(self): self.skipTest('Not supported') def test_router_add_interface_dup_subnet2_returns_400(self): self.skipTest('skipped') def test_floatingip_same_external_and_internal(self): self.skipTest('skipped') def test_create_router_fail_at_the_backend(self): p = directory.get_plugin() edge_manager = p.edge_manager with mock.patch.object(edge_manager, 'create_lrouter', side_effect=[n_exc.NeutronException]): router = {'router': {'admin_state_up': True, 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', 'tenant_id': 'fake_tenant', 'distributed': True}} self.assertRaises(n_exc.NeutronException, p.create_router, context.get_admin_context(), router) self._test_list_resources('router', ()) def test_update_port_device_id_to_different_tenants_router(self): self.skipTest('TBD') def test_router_add_and_remove_gateway_tenant_ctx(self): self.skipTest('TBD') def _create_router(self, fmt, tenant_id, name=None, admin_state_up=None, set_context=False, arg_list=None, **kwargs): tenant_id = tenant_id or _uuid() data = {'router': {'tenant_id': tenant_id}} if name: data['router']['name'] = name if admin_state_up: data['router']['admin_state_up'] = admin_state_up for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())): # Arg must be present and not empty if arg in kwargs and kwargs[arg]: data['router'][arg] = kwargs[arg] if 'distributed' in kwargs: data['router']['distributed'] = kwargs['distributed'] else: data['router']['distributed'] = True if ('availability_zone_hints' in kwargs and kwargs['availability_zone_hints'] is not None): data['router']['availability_zone_hints'] = kwargs[ 'availability_zone_hints'] if kwargs.get('router_type'): data['router']['router_type'] = kwargs.get('router_type') router_req = self.new_create_request('routers', data, fmt) if set_context and tenant_id: # create a specific auth context for this request router_req.environ['neutron.context'] = context.Context( '', tenant_id) return router_req.get_response(self.ext_api) def _test_router_plr_binding(self, expected_size='compact', availability_zone=None): """Test PLR router bindings Create a distributed router with an external network and check that the router was created as it should from the binding entry """ # create a distributed router tenant_id = _uuid() router_ctx = context.Context('', tenant_id) az_hints = [availability_zone] if availability_zone else None res = self._create_router(self.fmt, tenant_id, distributed=True, availability_zone_hints=az_hints) r = self.deserialize(self.fmt, res) self.assertIn('router', r) with self._create_l3_ext_network() as net: with self.subnet(network=net, enable_dhcp=False) as s2: # Plug network with external mapping self._set_net_external(s2['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id'], neutron_context=router_ctx) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s2['subnet']['network_id']) # make sure the plr router was created, with the expected data plr_id = self.plugin_instance.edge_manager.get_plr_by_tlr_id( router_ctx, r['router']['id']) binding = nsxv_db.get_nsxv_router_binding( router_ctx.session, plr_id) self.assertEqual(expected_size, binding['appliance_size']) self.assertEqual('ACTIVE', binding['status']) self.assertIsNotNone(binding['edge_id']) self.assertEqual('service', binding['edge_type']) self.assertTrue(binding['router_id'].startswith('plr')) if availability_zone: self.assertEqual( availability_zone, binding['availability_zone']) else: self.assertEqual('default', binding['availability_zone']) # Cleanup self._remove_external_gateway_from_router( r['router']['id'], s2['subnet']['network_id']) def test_router_plr_binding_default_size(self): self._test_router_plr_binding() def test_router_plr_binding_configured_size(self): cfg.CONF.set_override('exclusive_router_appliance_size', 'large', group="nsxv") self._test_router_plr_binding(expected_size='large') def test_router_plr_binding_default_az(self): self._test_router_plr_binding(availability_zone='default') def test_router_plr_binding_with_az(self): self._test_router_plr_binding(availability_zone=self.az_name) def test_router_binding_with_az(self): """Check distributed router creation with an availability zone """ # create a distributed router tenant_id = _uuid() router_ctx = context.Context('', tenant_id) res = self._create_router(self.fmt, tenant_id, distributed=True, availability_zone_hints=[self.az_name]) r = self.deserialize(self.fmt, res) self.assertIn('router', r) # check that we have an edge for this router, with the correct # availability zone binding = nsxv_db.get_nsxv_router_binding( router_ctx.session, r['router']['id']) self.assertEqual('compact', binding['appliance_size']) self.assertEqual('ACTIVE', binding['status']) self.assertIsNotNone(binding['edge_id']) self.assertEqual('vdr', binding['edge_type']) self.assertEqual(binding['router_id'], r['router']['id']) self.assertEqual(self.az_name, binding['availability_zone']) def _test_router_create_with_distributed(self, dist_input, dist_expected, return_code=201, **kwargs): data = {'tenant_id': 'whatever'} data['name'] = 'router1' data['distributed'] = dist_input for k, v in six.iteritems(kwargs): data[k] = v router_req = self.new_create_request( 'routers', {'router': data}, self.fmt) res = router_req.get_response(self.ext_api) self.assertEqual(return_code, res.status_int) if res.status_int == 201: router = self.deserialize(self.fmt, res) self.assertIn('distributed', router['router']) if dist_input: self.assertNotIn('router_type', router['router']) self.assertEqual(dist_expected, router['router']['distributed']) def test_create_router_fails_with_router_type(self): self._test_router_create_with_distributed(True, True, return_code=400, router_type="shared") def test_router_create_distributed(self): self._test_router_create_with_distributed(True, True) def test_router_create_not_distributed(self): self._test_router_create_with_distributed(False, False) def test_router_create_distributed_unspecified(self): self._test_router_create_with_distributed(None, False) def _test_create_router_with_az_hint(self, with_hint): # init the availability zones in the plugin az_name = 'az7' set_az_in_config(az_name) p = directory.get_plugin() p._availability_zones_data = nsx_az.NsxVAvailabilityZones() # create a router with/without hints router = {'router': {'admin_state_up': True, 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', 'tenant_id': 'FAKE_TENANT', 'distributed': True}} if with_hint: router['router']['availability_zone_hints'] = [az_name] returned_router = p.create_router(context.get_admin_context(), router) # availability zones is still empty because the router is not attached if with_hint: self.assertEqual([az_name], returned_router['availability_zone_hints']) else: self.assertEqual([], returned_router['availability_zone_hints']) edge_id = edge_utils.get_router_edge_id( context.get_admin_context(), returned_router['id']) res_az = nsxv_db.get_edge_availability_zone( context.get_admin_context().session, edge_id) expected_az = az_name if with_hint else 'default' self.assertEqual(expected_az, res_az) def test_create_router_with_az_hint(self): self._test_create_router_with_az_hint(True) def test_create_router_without_az_hint(self): self._test_create_router_with_az_hint(False) def test_floatingip_with_assoc_fails(self): self._test_floatingip_with_assoc_fails( self._plugin_name + '._check_and_get_fip_assoc') def test_floatingip_update(self): super(TestVdrTestCase, self).test_floatingip_update( constants.FLOATINGIP_STATUS_DOWN) def test_floatingip_with_invalid_create_port(self): self._test_floatingip_with_invalid_create_port(self._plugin_name) def test_router_add_gateway_invalid_network_returns_404(self): with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], uuidutils.generate_uuid(), expected_code=webob.exc.HTTPNotFound.code) def test_router_add_interfaces_with_multiple_subnets_on_same_network(self): with self.router() as r,\ self.network() as n,\ self.subnet(network=n) as s1,\ self.subnet(network=n, cidr='11.0.0.0/24') as s2: self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) err_code = webob.exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None, err_code) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) def test_router_add_interface_with_external_net_fail(self): with self.router() as r,\ self.network() as n,\ self.subnet(network=n) as s: # Set the network as an external net net_id = n['network']['id'] self._set_net_external(net_id) err_code = webob.exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None, err_code) def test_different_type_routers_add_interfaces_on_same_network_pass(self): with self.router() as dist, \ self.router(distributed=False, router_type='shared') as shared, \ self.router(distributed=False, router_type='exclusive') as excl: with self.network() as n: with self.subnet(network=n) as s1, \ self.subnet(network=n, cidr='11.0.0.0/24') as s2, \ self.subnet(network=n, cidr='12.0.0.0/24') as s3: self._router_interface_action('add', shared['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', excl['router']['id'], s2['subnet']['id'], None) self._router_interface_action('add', dist['router']['id'], s3['subnet']['id'], None) self._router_interface_action('remove', dist['router']['id'], s3['subnet']['id'], None) self._router_interface_action('remove', excl['router']['id'], s2['subnet']['id'], None) self._router_interface_action('remove', shared['router']['id'], s1['subnet']['id'], None) def test_router_update_type_fails(self): """Check distributed router cannot change it's type """ # create a distributed router tenant_id = _uuid() res = self._create_router(self.fmt, tenant_id, distributed=True) r = self.deserialize(self.fmt, res) router_id = r['router']['id'] # make sure changing the type fails self._update('routers', router_id, {'router': {'router_type': 'shared'}}, expected_code=400) self._update('routers', router_id, {'router': {'router_type': 'exclusive'}}, expected_code=400) self._update('routers', router_id, {'router': {'distributed': False}}, expected_code=400) # make sure keeping the type is ok self._update('routers', router_id, {'router': {'distributed': True}}, expected_code=200) def test_router_add_interface_multiple_ipv4_subnets(self): self.skipTest('TBD') def test_router_remove_ipv6_subnet_from_interface(self): self.skipTest('TBD') def test_router_add_interface_multiple_ipv6_subnets_same_net(self): self.skipTest('TBD') def test_router_add_interface_multiple_ipv6_subnets_different_net(self): self.skipTest('TBD') def test_create_router_gateway_fails(self): self.skipTest('not supported') def test_floatingip_update_to_same_port_id_twice(self): self.skipTest('Plugin changes floating port status') def test_update_subnet_gateway_for_external_net(self): plugin = directory.get_plugin() router_obj = dist_router_driver.RouterDistributedDriver(plugin) with mock.patch.object(plugin, '_find_router_driver', return_value=router_obj): with mock.patch.object(router_obj, '_update_nexthop') as update_nexthop: super(TestVdrTestCase, self).test_update_subnet_gateway_for_external_net() self.assertTrue(update_nexthop.called) def test_router_add_interface_ipv6_port_existing_network_returns_400(self): """Ensure unique IPv6 router ports per network id. Adding a router port containing one or more IPv6 subnets with the same network id as an existing router port should fail. This is so there is no ambiguity regarding on which port to add an IPv6 subnet when executing router-interface-add with a subnet and no port. """ with self.network() as n, self.router() as r: with self.subnet(network=n, cidr='fd00::/64', ip_version=6, enable_dhcp=False) as s1, ( self.subnet(network=n, cidr='fd01::/64', ip_version=6, enable_dhcp=False)) as s2: with self.port(subnet=s1) as p: exp_code = webob.exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None, expected_code=exp_code) self._router_interface_action('add', r['router']['id'], None, p['port']['id'], expected_code=exp_code) class TestNSXvAllowedAddressPairs(NsxVPluginV2TestCase, test_addr_pair.TestAllowedAddressPairs): def setUp(self, plugin=PLUGIN_NAME): super(TestNSXvAllowedAddressPairs, self).setUp(plugin=plugin) # NOTE: the tests below are skipped due to the fact that they update the # mac address. The NSX|V does not support address pairs when a MAC address # is configured. def test_create_port_allowed_address_pairs(self): pass def test_update_add_address_pairs(self): pass def test_equal_to_max_allowed_address_pair(self): pass def test_update_port_security_off_address_pairs(self): pass def test_create_port_security_true_allowed_address_pairs(self): pass def test_create_port_security_false_allowed_address_pairs(self): pass def _test_create_port_remove_allowed_address_pairs(self, update_value): pass def test_create_overlap_with_fixed_ip(self): pass def test_create_port_with_cidr_address_pair(self): with self.network() as net: address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '192.168.1.0/24'}] self._create_port(self.fmt, net['network']['id'], expected_res_status=webob.exc.HTTPBadRequest.code, arg_list=(addrp_apidef.ADDRESS_PAIRS,), allowed_address_pairs=address_pairs) class TestNSXPortSecurity(test_psec.TestPortSecurity, NsxVPluginV2TestCase): def setUp(self, plugin=PLUGIN_NAME): super(TestNSXPortSecurity, self).setUp(plugin=plugin) def test_create_port_fails_with_secgroup_and_port_security_false(self): # Security Groups can be used even when port-security is disabled pass def test_update_port_security_off_with_security_group(self): # Security Groups can be used even when port-security is disabled pass def test_create_port_with_security_group_and_net_sec_false(self): pass def _create_compute_port(self, network_name, device_id, port_security): # create a network without port security res = self._create_network('json', network_name, True) net = self.deserialize('json', res) # create a compute port with this network and a device res = self._create_port('json', net['network']['id'], arg_list=('port_security_enabled', 'device_id', 'device_owner',), port_security_enabled=port_security, device_id=device_id, device_owner='compute:None') return self.deserialize('json', res) def _add_vnic_to_port(self, port_id, add_exclude, vnic_index): """Add vnic to a port and check if the device was added to the exclude list """ plugin = self._get_core_plugin_with_dvs() vm_moref = 'dummy_moref' with mock.patch.object(plugin._vcm, 'get_vm_moref', return_value=vm_moref): with mock.patch.object( plugin.nsx_v.vcns, 'add_vm_to_exclude_list') as exclude_list_add: data = {'port': {'vnic_index': vnic_index}} self.new_update_request( 'ports', data, port_id).get_response(self.api) if add_exclude: # make sure the vm was added to the exclude list exclude_list_add.assert_called_once_with(vm_moref) else: self.assertFalse(exclude_list_add.called) def _del_vnic_from_port(self, port_id, del_exclude): """Delete the vnic & device id from the port and check if the device was removed from the exclude list """ plugin = self._get_core_plugin_with_dvs() vm_moref = 'dummy_moref' with mock.patch.object(plugin._vcm, 'get_vm_moref', return_value=vm_moref): with mock.patch.object( plugin.nsx_v.vcns, 'delete_vm_from_exclude_list') as exclude_list_del: data = {'port': {'vnic_index': None, 'device_id': ''}} self.new_update_request( 'ports', data, port_id).get_response(self.api) if del_exclude: # make sure the vm was added to the exclude list exclude_list_del.assert_called_once_with(vm_moref) else: self.assertFalse(exclude_list_del.called) def _del_port_with_vnic(self, port_id, del_exclude): """Delete port with vnic, and check if the device was removed from the exclude list """ plugin = self._get_core_plugin_with_dvs() vm_moref = 'dummy_moref' with mock.patch.object(plugin._vcm, 'get_vm_moref', return_value=vm_moref): with mock.patch.object( plugin.nsx_v.vcns, 'delete_vm_from_exclude_list') as exclude_list_del: self.new_delete_request( 'ports', port_id).get_response(self.api) if del_exclude: # make sure the vm was added to the exclude list exclude_list_del.assert_called_once_with(vm_moref) else: self.assertFalse(exclude_list_del.called) def test_update_port_no_security_with_vnic(self): device_id = _uuid() # create a compute port without port security port = self._create_compute_port('net1', device_id, False) # add vnic to the port self._add_vnic_to_port(port['port']['id'], True, 3) # delete vnic from the port self._del_vnic_from_port(port['port']['id'], True) def test_update_multiple_port_no_security_with_vnic(self): device_id = _uuid() # create a compute port without port security port1 = self._create_compute_port('net1', device_id, False) # add vnic to the port self._add_vnic_to_port(port1['port']['id'], True, 3) # create another compute port without port security on the same device port2 = self._create_compute_port('net2', device_id, False) # add vnic to the port (no need to add to exclude list again) self._add_vnic_to_port(port2['port']['id'], False, 4) # delete vnics from the port self._del_vnic_from_port(port1['port']['id'], False) self._del_vnic_from_port(port2['port']['id'], True) def test_update_mixed_port_no_security_with_vnic(self): device_id = _uuid() # create a compute port without port security port1 = self._create_compute_port('net1', device_id, True) # add vnic to the port self._add_vnic_to_port(port1['port']['id'], False, 3) irrelevant_device_id = _uuid() # create a compute port without port security for a different device port2 = self._create_compute_port('net1', irrelevant_device_id, True) # add vnic to the port self._add_vnic_to_port(port2['port']['id'], False, 3) # create another compute port without port security on the same device port3 = self._create_compute_port('net2', device_id, False) # add vnic to the port (no need to add to exclude list again) self._add_vnic_to_port(port3['port']['id'], True, 4) # delete vnics from the port self._del_vnic_from_port(port1['port']['id'], False) self._del_vnic_from_port(port3['port']['id'], True) self._del_vnic_from_port(port2['port']['id'], False) def test_delete_port_no_security_with_vnic(self): device_id = _uuid() # create a compute port without port security port = self._create_compute_port('net1', device_id, False) # add vnic to the port self._add_vnic_to_port(port['port']['id'], True, 3) # delete port with the vnic self._del_port_with_vnic(port['port']['id'], True) def test_delete_multiple_port_no_security_with_vnic(self): device_id = _uuid() # create a compute port without port security port1 = self._create_compute_port('net1', device_id, False) # add vnic to the port self._add_vnic_to_port(port1['port']['id'], True, 3) # create another compute port without port security on the same device port2 = self._create_compute_port('net2', device_id, False) # add vnic to the port (no need to add to exclude list again) self._add_vnic_to_port(port2['port']['id'], False, 4) # delete ports with the vnics self._del_port_with_vnic(port2['port']['id'], False) self._del_port_with_vnic(port1['port']['id'], True) def test_detach_port_no_sec(self): device_id = _uuid() # create a compute port without port security port = self._create_compute_port('net1', device_id, False) # add vnic to the port self._add_vnic_to_port(port['port']['id'], True, 3) # detach the port with mock.patch.object( self.fc2, 'inactivate_vnic_assigned_addresses') as mock_inactivte: self._del_vnic_from_port(port['port']['id'], True) # inactivate spoofguard should not be called self.assertFalse(mock_inactivte.called) def test_detach_port_with_sec(self): device_id = _uuid() # create a compute port without port security port = self._create_compute_port('net1', device_id, True) # add vnic to the port self._add_vnic_to_port(port['port']['id'], False, 3) # detach the port with mock.patch.object( self.fc2, 'inactivate_vnic_assigned_addresses') as mock_inactivte: self._del_vnic_from_port(port['port']['id'], False) # inactivate spoofguard should be called self.assertTrue(mock_inactivte.called) def _toggle_port_security(self, port_id, enable_port_security, update_exclude): """Enable/disable port security on a port, and verify that the exclude list was updated as expected """ plugin = self._get_core_plugin_with_dvs() vm_moref = 'dummy_moref' data = {'port': {'port_security_enabled': enable_port_security}} with mock.patch.object(plugin._vcm, 'get_vm_moref', return_value=vm_moref): if enable_port_security: with mock.patch.object( plugin.nsx_v.vcns, 'delete_vm_from_exclude_list') as exclude_list_del: self.new_update_request( 'ports', data, port_id).get_response(self.api) if update_exclude: # make sure the vm was added to the exclude list exclude_list_del.assert_called_once_with(vm_moref) else: self.assertFalse(exclude_list_del.called) else: with mock.patch.object( plugin.nsx_v.vcns, 'add_vm_to_exclude_list') as exclude_list_add: self.new_update_request( 'ports', data, port_id).get_response(self.api) if update_exclude: # make sure the vm was added to the exclude list exclude_list_add.assert_called_once_with(vm_moref) else: self.assertFalse(exclude_list_add.called) def test_update_port_security_with_vnic(self): device_id = _uuid() # create a compute port without port security port = self._create_compute_port('net1', device_id, False) # add vnic to the port self._add_vnic_to_port(port['port']['id'], True, 3) # enable port security self._toggle_port_security(port['port']['id'], True, True) # disable port security self._toggle_port_security(port['port']['id'], False, True) # delete vnic from the port self._del_vnic_from_port(port['port']['id'], True) def test_update_multiple_port_security_with_vnic(self): device_id = _uuid() # create a compute port without port security port1 = self._create_compute_port('net1', device_id, False) # add vnic to the port self._add_vnic_to_port(port1['port']['id'], True, 3) # create another compute port without port security port2 = self._create_compute_port('net2', device_id, False) # add vnic to the port self._add_vnic_to_port(port2['port']['id'], False, 4) # enable port security on both ports self._toggle_port_security(port1['port']['id'], True, False) self._toggle_port_security(port2['port']['id'], True, True) # disable port security on both ports self._toggle_port_security(port1['port']['id'], False, True) self._toggle_port_security(port2['port']['id'], False, False) def test_service_insertion(self): # init the plugin mocks p = directory.get_plugin() self.fc2.add_member_to_security_group = ( mock.Mock().add_member_to_security_group) self.fc2.remove_member_from_security_group = ( mock.Mock().remove_member_from_security_group) # mock the service insertion handler p._si_handler = mock.Mock() p._si_handler.enabled = True p._si_handler.sg_id = '11' # create a compute port with port security device_id = _uuid() port = self._create_compute_port('net1', device_id, True) # add vnic to the port, and verify that the port was added to the # service insertion security group vnic_id = 3 vnic_index = '%s.%03d' % (device_id, vnic_id) self.fc2.add_member_to_security_group.reset_mock() self._add_vnic_to_port(port['port']['id'], False, vnic_id) self.fc2.add_member_to_security_group.assert_any_call( p._si_handler.sg_id, vnic_index) # disable the port security and make sure it is removed from the # security group self.fc2.remove_member_from_security_group.reset_mock() self._toggle_port_security(port['port']['id'], False, True) self.fc2.remove_member_from_security_group.assert_any_call( p._si_handler.sg_id, vnic_index) def test_service_insertion_notify(self): # create a compute ports with/without port security device_id = _uuid() # create 2 compute ports with port security port1 = self._create_compute_port('net1', device_id, True) self._add_vnic_to_port(port1['port']['id'], False, 1) port2 = self._create_compute_port('net2', device_id, True) self._add_vnic_to_port(port2['port']['id'], False, 2) # create 1 compute port without port security port3 = self._create_compute_port('net3', device_id, False) self._add_vnic_to_port(port3['port']['id'], True, 3) # init the plugin mocks p = directory.get_plugin() self.fc2.add_member_to_security_group = ( mock.Mock().add_member_to_security_group) # call the function (that should be called from the flow classifier # driver) and verify it adds all relevant ports to the group # Since it uses spawn_n, we should mock it. orig_spawn = c_utils.spawn_n c_utils.spawn_n = mock.Mock(side_effect=lambda f, x: f(x, None)) p.add_vms_to_service_insertion(sg_id='aaa') # back to normal c_utils.spawn_n = orig_spawn self.assertEqual(2, self.fc2.add_member_to_security_group.call_count) def test_toggle_non_compute_port_security(self): # create a network without port security res = self._create_network('json', 'net1', True) net = self.deserialize('json', res) # create a port with this network and a device res = self._create_port('json', net['network']['id'], arg_list=('port_security_enabled',), port_security_enabled=True) port = self.deserialize('json', res) port_id = port['port']['id'] # Disable port security data = {'port': {'port_security_enabled': False}} updated_port = self.deserialize( 'json', self.new_update_request('ports', data, port_id).get_response(self.api)) self.assertFalse(updated_port['port']['port_security_enabled']) shown_port = self.deserialize( 'json', self.new_show_request('ports', port_id).get_response(self.api)) self.assertFalse(shown_port['port']['port_security_enabled']) # Enable port security data = {'port': {'port_security_enabled': True}} updated_port = self.deserialize( 'json', self.new_update_request('ports', data, port_id).get_response(self.api)) self.assertTrue(updated_port['port']['port_security_enabled']) shown_port = self.deserialize( 'json', self.new_show_request('ports', port_id).get_response(self.api)) self.assertTrue(shown_port['port']['port_security_enabled']) class TestSharedRouterTestCase(L3NatTest, L3NatTestCaseBase, test_l3_plugin.L3NatTestCaseMixin, NsxVPluginV2TestCase): def _create_router(self, fmt, tenant_id, name=None, admin_state_up=None, set_context=False, arg_list=None, **kwargs): tenant_id = tenant_id or _uuid() data = {'router': {'tenant_id': tenant_id}} if name: data['router']['name'] = name if admin_state_up: data['router']['admin_state_up'] = admin_state_up for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())): # Arg must be present and not empty if arg in kwargs and kwargs[arg]: data['router'][arg] = kwargs[arg] data['router']['router_type'] = kwargs.get('router_type', 'shared') router_req = self.new_create_request('routers', data, fmt) if set_context and tenant_id: # create a specific auth context for this request router_req.environ['neutron.context'] = context.Context( '', tenant_id) return router_req.get_response(self.ext_api) @mock.patch.object(edge_utils.EdgeManager, 'update_interface_addr') def test_router_add_interface_multiple_ipv6_subnets_same_net(self, mock): super(TestSharedRouterTestCase, self).test_router_add_interface_multiple_ipv6_subnets_same_net() def test_router_create_with_no_edge(self): name = 'router1' tenant_id = _uuid() expected_value = [('name', name), ('tenant_id', tenant_id), ('admin_state_up', True), ('status', 'ACTIVE'), ('external_gateway_info', None)] with self.router(name='router1', admin_state_up=True, tenant_id=tenant_id) as router: for k, v in expected_value: self.assertEqual(router['router'][k], v) self.assertEqual( [], self.plugin_instance.edge_manager.get_routers_on_same_edge( context.get_admin_context(), router['router']['id'])) def test_router_create_with_size_fail_at_backend(self): data = {'router': { 'tenant_id': 'whatever', 'router_type': 'shared', 'router_size': 'large'}} router_req = self.new_create_request('routers', data, self.fmt) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) msg = ('Bad router request: ' 'Cannot specify router-size for shared router.') self.assertEqual("BadRequest", router['NeutronError']['type']) self.assertEqual(msg, router['NeutronError']['message']) def test_router_create_with_gwinfo_with_no_edge(self): with self._create_l3_ext_network() as net: with self.subnet(network=net, enable_dhcp=False) as s: data = {'router': {'tenant_id': 'whatever'}} data['router']['name'] = 'router1' data['router']['external_gateway_info'] = { 'network_id': s['subnet']['network_id']} router_req = self.new_create_request('routers', data, self.fmt) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) self.assertEqual( s['subnet']['network_id'], (router['router']['external_gateway_info'] ['network_id'])) self.assertEqual( [], self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), router['router']['id'])) def test_router_update_with_routes_fail(self): """Shared router currently does not support static routes """ with self.router() as r: router_id = r['router']['id'] body = self._show('routers', router_id) body['router']['routes'] = [{'destination': '5.5.5.5/32', 'nexthop': '6.6.6.6'}] self._update('routers', router_id, body, expected_code=400, neutron_context=context.get_admin_context()) def test_router_update_gateway_with_no_edge(self): with self.router() as r: with self.subnet() as s1: with self._create_l3_ext_network() as net: with self.subnet(network=net, enable_dhcp=False) as s2: self._set_net_external(s1['subnet']['network_id']) try: self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s1['subnet']['network_id']) self.assertEqual( [], self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r['router']['id'])) # Plug network with external mapping self._set_net_external(s2['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s2['subnet']['network_id']) self.assertEqual( [], self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r['router']['id'])) finally: # Cleanup self._remove_external_gateway_from_router( r['router']['id'], s2['subnet']['network_id']) def test_router_update_gateway_with_existing_floatingip_with_edge(self): with self._create_l3_ext_network() as net: with self.subnet(network=net, enable_dhcp=False) as subnet: with self.floatingip_with_assoc() as fip: self._add_external_gateway_to_router( fip['floatingip']['router_id'], subnet['subnet']['network_id'], expected_code=webob.exc.HTTPConflict.code) self.assertNotEqual( [], self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), fip['floatingip']['router_id'])) def test_router_set_gateway_with_interfaces_with_edge(self): with self.router() as r, self.subnet() as s1: self._set_net_external(s1['subnet']['network_id']) try: self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s1['subnet']['network_id']) self.assertEqual( [], self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r['router']['id'])) with self.subnet(cidr='11.0.0.0/24') as s11: with self.subnet(cidr='12.0.0.0/24') as s12: self._router_interface_action('add', r['router']['id'], s11['subnet']['id'], None) self._router_interface_action('add', r['router']['id'], s12['subnet']['id'], None) self.assertIsNotNone( self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r['router']['id'])) self._router_interface_action('remove', r['router']['id'], s11['subnet']['id'], None) self.assertIsNotNone( self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r['router']['id'])) self._router_interface_action('remove', r['router']['id'], s12['subnet']['id'], None) self.assertEqual( [], self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r['router']['id'])) finally: # Cleanup self._remove_external_gateway_from_router( r['router']['id'], s1['subnet']['network_id']) @mock.patch.object(edge_utils, "update_firewall") def test_routers_set_gateway_with_nosnat(self, mock): expected_fw1 = [{'action': 'allow', 'enabled': True, 'name': 'Subnet Rule', 'source_ip_address': [], 'destination_ip_address': []}] expected_fw2 = [{'action': 'allow', 'enabled': True, 'name': 'Subnet Rule', 'source_ip_address': [], 'destination_ip_address': []}] nosnat_fw1 = [{'action': 'allow', 'enabled': True, 'name': 'No SNAT Rule', 'source_vnic_groups': ["external"], 'destination_ip_address': []}] nosnat_fw2 = [{'action': 'allow', 'enabled': True, 'name': 'No SNAT Rule', 'source_vnic_groups': ["external"], 'destination_ip_address': []}] with self.router() as r1, self.router() as r2,\ self.subnet() as ext_subnet,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(cidr='12.0.0.0/24') as s2: self._set_net_external(ext_subnet['subnet']['network_id']) self._router_interface_action( 'add', r1['router']['id'], s1['subnet']['id'], None) expected_fw1[0]['source_ip_address'] = ['11.0.0.0/24'] expected_fw1[0]['destination_ip_address'] = ['11.0.0.0/24'] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual(self._recursive_sort_list(expected_fw1), self._recursive_sort_list(fw_rules)) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) self._add_external_gateway_to_router( r1['router']['id'], ext_subnet['subnet']['network_id']) self._add_external_gateway_to_router( r2['router']['id'], ext_subnet['subnet']['network_id']) expected_fw2[0]['source_ip_address'] = ['12.0.0.0/24'] expected_fw2[0]['destination_ip_address'] = ['12.0.0.0/24'] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list(expected_fw1 + expected_fw2), self._recursive_sort_list(fw_rules)) self._update_router_enable_snat( r1['router']['id'], ext_subnet['subnet']['network_id'], False) nosnat_fw1[0]['destination_ip_address'] = ['11.0.0.0/24'] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list(expected_fw1 + expected_fw2 + nosnat_fw1), self._recursive_sort_list(fw_rules)) self._update_router_enable_snat( r2['router']['id'], ext_subnet['subnet']['network_id'], False) nosnat_fw2[0]['destination_ip_address'] = ['12.0.0.0/24'] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list(expected_fw1 + expected_fw2 + nosnat_fw1 + nosnat_fw2), self._recursive_sort_list(fw_rules)) self._update_router_enable_snat( r2['router']['id'], ext_subnet['subnet']['network_id'], True) fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list(expected_fw1 + expected_fw2 + nosnat_fw1), self._recursive_sort_list(fw_rules)) self._router_interface_action('remove', r2['router']['id'], s2['subnet']['id'], None) fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list(expected_fw1 + nosnat_fw1), self._recursive_sort_list(fw_rules)) self._remove_external_gateway_from_router( r1['router']['id'], ext_subnet['subnet']['network_id']) fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list(expected_fw1), self._recursive_sort_list(fw_rules)) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) self._remove_external_gateway_from_router( r2['router']['id'], ext_subnet['subnet']['network_id']) def test_routers_with_interface_on_same_edge(self): with self.router() as r1, self.router() as r2,\ self.subnet(cidr='11.0.0.0/24') as s11,\ self.subnet(cidr='12.0.0.0/24') as s12: self._router_interface_action('add', r1['router']['id'], s11['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s12['subnet']['id'], None) routers_expected = [r1['router']['id'], r2['router']['id']] routers_1 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r1['router']['id'])) self.assertEqual(set(routers_expected), set(routers_1)) routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(set(routers_expected), set(routers_2)) self._router_interface_action('remove', r1['router']['id'], s11['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s12['subnet']['id'], None) def test_routers_with_overlap_interfaces(self): with self.router() as r1, self.router() as r2,\ self.subnet(cidr='11.0.0.0/24') as s11,\ self.subnet(cidr='11.0.0.0/24') as s12: self._router_interface_action('add', r1['router']['id'], s11['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s12['subnet']['id'], None) r1_expected = [r1['router']['id']] routers_1 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r1['router']['id'])) self.assertEqual(r1_expected, routers_1) r2_expected = [r2['router']['id']] routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(r2_expected, routers_2) self._router_interface_action('remove', r1['router']['id'], s11['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s12['subnet']['id'], None) def test_routers_with_overlap_interfaces_with_migration(self): with self.router() as r1, self.router() as r2,\ self.subnet(cidr='11.0.0.0/24') as s11,\ self.subnet(cidr='12.0.0.0/24') as s12,\ self.subnet(cidr='11.0.0.0/24') as s13: self._router_interface_action('add', r1['router']['id'], s11['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s12['subnet']['id'], None) r1_expected = [r1['router']['id'], r2['router']['id']] routers_1 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r1['router']['id'])) self.assertEqual(set(r1_expected), set(routers_1)) self._router_interface_action('add', r2['router']['id'], s13['subnet']['id'], None) r1_expected = [r1['router']['id']] routers_1 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r1['router']['id'])) self.assertEqual(r1_expected, routers_1) self._router_interface_action('remove', r1['router']['id'], s11['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s12['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s13['subnet']['id'], None) def test_routers_with_different_subnet_on_same_network(self): with self.router() as r1, self.router() as r2,\ self.network() as net,\ self.subnet(network=net, cidr='12.0.0.0/24') as s1,\ self.subnet(network=net, cidr='13.0.0.0/24') as s2: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(1, len(routers_2)) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s2['subnet']['id'], None) def test_routers_with_different_subnet_on_same_network_migration(self): with self.router() as r1, self.router() as r2, self.network() as net,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(network=net, cidr='12.0.0.0/24') as s2,\ self.subnet(network=net, cidr='13.0.0.0/24') as s3: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(2, len(routers_2)) self._router_interface_action('add', r2['router']['id'], s3['subnet']['id'], None) routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(2, len(routers_2)) self._router_interface_action('remove', r2['router']['id'], s3['subnet']['id'], None) self._router_interface_action('add', r1['router']['id'], s3['subnet']['id'], None) routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(1, len(routers_2)) self._router_interface_action('remove', r1['router']['id'], s3['subnet']['id'], None) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s2['subnet']['id'], None) def test_routers_set_same_gateway_on_same_edge(self): with self.router() as r1, self.router() as r2,\ self.network() as ext_net,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(cidr='12.0.0.0/24') as s2,\ self.subnet(network=ext_net, cidr='13.0.0.0/24'): self._set_net_external(ext_net['network']['id']) self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) self._add_external_gateway_to_router( r1['router']['id'], ext_net['network']['id']) self._add_external_gateway_to_router( r2['router']['id'], ext_net['network']['id']) routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(2, len(routers_2)) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s2['subnet']['id'], None) self._remove_external_gateway_from_router( r1['router']['id'], ext_net['network']['id']) self._remove_external_gateway_from_router( r2['router']['id'], ext_net['network']['id']) def test_routers_set_different_gateway_on_different_edge(self): with self.router() as r1, self.router() as r2,\ self.network() as ext1, self.network() as ext2,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(cidr='12.0.0.0/24') as s2,\ self.subnet(network=ext1, cidr='13.0.0.0/24'),\ self.subnet(network=ext2, cidr='14.0.0.0/24'): self._set_net_external(ext1['network']['id']) self._set_net_external(ext2['network']['id']) self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) self._add_external_gateway_to_router( r1['router']['id'], ext1['network']['id']) self._add_external_gateway_to_router( r2['router']['id'], ext1['network']['id']) routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(2, len(routers_2)) self._add_external_gateway_to_router( r2['router']['id'], ext2['network']['id']) routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(1, len(routers_2)) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s2['subnet']['id'], None) self._remove_external_gateway_from_router( r1['router']['id'], ext1['network']['id']) self._remove_external_gateway_from_router( r2['router']['id'], ext2['network']['id']) def test_get_available_and_conflicting_ids_with_no_conflict(self): with self.router() as r1, self.router() as r2,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(cidr='12.0.0.0/24') as s2: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) router_driver = (self.plugin_instance._router_managers. get_tenant_router_driver(context, 'shared')) available_router_ids, conflict_router_ids = ( router_driver._get_available_and_conflicting_ids( context.get_admin_context(), r1['router']['id'])) self.assertIn(r2['router']['id'], available_router_ids) self.assertEqual(0, len(conflict_router_ids)) def test_get_available_and_conflicting_ids_with_conflict(self): with self.router() as r1, self.router() as r2,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(cidr='11.0.0.0/24') as s2: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) router_driver = (self.plugin_instance._router_managers. get_tenant_router_driver(context, 'shared')) available_router_ids, conflict_router_ids = ( router_driver._get_available_and_conflicting_ids( context.get_admin_context(), r1['router']['id'])) self.assertIn(r2['router']['id'], conflict_router_ids) self.assertEqual(0, len(available_router_ids)) def test_get_available_and_conflicting_ids_with_diff_gw(self): with self.router() as r1, self.router() as r2,\ self.network() as ext1, self.network() as ext2,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(cidr='12.0.0.0/24') as s2,\ self.subnet(network=ext1, cidr='13.0.0.0/24'),\ self.subnet(network=ext2, cidr='14.0.0.0/24'): self._set_net_external(ext1['network']['id']) self._set_net_external(ext2['network']['id']) self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) self._add_external_gateway_to_router( r1['router']['id'], ext1['network']['id']) self._add_external_gateway_to_router( r2['router']['id'], ext2['network']['id']) router_driver = (self.plugin_instance._router_managers. get_tenant_router_driver(context, 'shared')) available_router_ids, conflict_router_ids = ( router_driver._get_available_and_conflicting_ids( context.get_admin_context(), r1['router']['id'])) self.assertIn(r2['router']['id'], conflict_router_ids) self.assertEqual(0, len(available_router_ids)) def test_get_available_and_conflicting_ids_with_tenants(self): cfg.CONF.set_override('share_edges_between_tenants', False, group="nsxv") with self.router(tenant_id='fake1') as r1,\ self.router(tenant_id='fake2') as r2,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(cidr='12.0.0.0/24') as s2: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) router_driver = (self.plugin_instance._router_managers. get_tenant_router_driver(context, 'shared')) available_router_ids, conflict_router_ids = ( router_driver._get_available_and_conflicting_ids( context.get_admin_context(), r1['router']['id'])) self.assertIn(r2['router']['id'], conflict_router_ids) self.assertEqual(0, len(available_router_ids)) def test_migrate_shared_router_to_exclusive(self): with self.router(name='r7') as r1, \ self.subnet(cidr='11.0.0.0/24') as s1: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) # update the router type: router_id = r1['router']['id'] self._update('routers', router_id, {'router': {'router_type': 'exclusive'}}) # get the updated router and check it's type body = self._show('routers', router_id) self.assertEqual('exclusive', body['router']['router_type']) def _test_create_router_with_az_hint(self, with_hint): # init the availability zones in the plugin az_name = 'az7' set_az_in_config(az_name) p = directory.get_plugin() p._availability_zones_data = nsx_az.NsxVAvailabilityZones() # create a router with/without hints router = {'router': {'admin_state_up': True, 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', 'tenant_id': 'FAKE_TENANT', 'router_type': 'shared'}} if with_hint: router['router']['availability_zone_hints'] = [az_name] returned_router = p.create_router(context.get_admin_context(), router) # availability zones is still empty because the router is not attached if with_hint: self.assertEqual([az_name], returned_router['availability_zone_hints']) else: self.assertEqual([], returned_router['availability_zone_hints']) self.assertEqual([], returned_router['availability_zones']) # Add interface so the router will be attached to an edge with self.subnet() as s1: router_id = returned_router['id'] self._router_interface_action('add', router_id, s1['subnet']['id'], None) edge_id = edge_utils.get_router_edge_id( context.get_admin_context(), router_id) res_az = nsxv_db.get_edge_availability_zone( context.get_admin_context().session, edge_id) expected_az = az_name if with_hint else 'default' self.assertEqual(expected_az, res_az) def test_create_router_with_az_hint(self): self._test_create_router_with_az_hint(True) def test_create_router_without_az_hint(self): self._test_create_router_with_az_hint(False) class TestRouterFlavorTestCase(extension.ExtensionTestCase, test_l3_plugin.L3NatTestCaseMixin, L3NatTest ): FLAVOR_PLUGIN = 'neutron.services.flavors.flavors_plugin.FlavorsPlugin' def _mock_add_flavor_id(dummy, router_res, router_db): # this function is a registered callback so we can't mock it # in a regular way. # need to change behavior for this test suite only, since # there is no "unregister_dict_extend_funcs" if router_res['name'] == 'router_with_flavor': router_res['flavor_id'] = 'raspberry' def setUp(self, plugin=PLUGIN_NAME): # init the core plugin and flavors plugin service_plugins = {plugin_const.FLAVORS: self.FLAVOR_PLUGIN} super(TestRouterFlavorTestCase, self).setUp( plugin=plugin, service_plugins=service_plugins) self.plugin = directory.get_plugin() self.plugin._flv_plugin = directory.get_plugin(plugin_const.FLAVORS) self.plugin._process_router_flavor_create = mock.Mock() self.plugin.register_dict_extend_funcs( l3_apidef.ROUTERS, [self._mock_add_flavor_id]) # init the availability zones self.az_name = 'az7' set_az_in_config(self.az_name) self.plugin._availability_zones_data = ( nsx_az.NsxVAvailabilityZones()) self._iteration = 1 def assertSyslogConfig(self, expected): """Verify syslog was updated in fake driver Test assumes edge ids are created sequentally starting from edge-1 """ edge_id = ('edge-%s' % self._iteration) actual = self.plugin.nsx_v.vcns.get_edge_syslog(edge_id)[1] if not expected: # test expects no syslog to be configured self.assertNotIn('serverAddresses', actual) return self.assertEqual(expected['protocol'], actual['protocol']) self.assertEqual(expected['server_ip'], actual['serverAddresses']['ipAddress'][0]) if 'server2_ip' in expected: self.assertEqual(expected['server2_ip'], actual['serverAddresses']['ipAddress'][1]) def _test_router_create_with_flavor( self, metainfo, expected_data, create_type=None, create_size=None, create_az=None): router_data = {'flavor_id': 'dummy', 'tenant_id': 'whatever', 'name': 'router_with_flavor', 'admin_state_up': True} if create_type is not None: router_data['router_type'] = create_type if create_size is not None: router_data['router_size'] = create_size if create_az is not None: router_data['availability_zone_hints'] = [create_az] flavor_data = {'service_type': plugin_const.L3, 'enabled': True, 'service_profiles': ['profile_id']} # Mock the flavors plugin with mock.patch(self.FLAVOR_PLUGIN + '.get_flavor', return_value=flavor_data): with mock.patch(self.FLAVOR_PLUGIN + '.get_service_profile', return_value={'metainfo': metainfo}): router = self.plugin.create_router( context.get_admin_context(), {'router': router_data}) # syslog data is not part of router config # and needs to be validated separately if 'syslog' in expected_data.keys(): self.assertSyslogConfig(expected_data['syslog']) for key, expected_val in expected_data.items(): if key != 'syslog': self.assertEqual(expected_val, router[key]) def test_router_create_with_flavor_different_sizes(self): """Create exclusive router with size in flavor """ for size in ['compact', 'large', 'xlarge', 'quadlarge']: metainfo = "{'router_size':'%s'}" % size expected_router = {'router_type': 'exclusive', 'router_size': size} self._test_router_create_with_flavor( metainfo, expected_router, create_type='exclusive') def test_router_create_with_flavor_ex_different_sizes(self): """Create exclusive router with size and type in flavor """ for size in ['compact', 'large', 'xlarge', 'quadlarge']: metainfo = "{'router_size':'%s','router_type':'exclusive'}" % size expected_router = {'router_type': 'exclusive', 'router_size': size} self._test_router_create_with_flavor( metainfo, expected_router) def test_router_create_with_flavor_az(self): """Create exclusive router with availability zone in flavor """ metainfo = "{'availability_zone_hints':'%s'}" % self.az_name expected_router = {'router_type': 'exclusive', 'availability_zone_hints': [self.az_name], 'distributed': False} self._test_router_create_with_flavor( metainfo, expected_router, create_type='exclusive') def test_router_create_with_flavor_shared(self): """Create shared router with availability zone and type in flavor """ metainfo = ("{'availability_zone_hints':'%s'," "'router_type':'shared'}" % self.az_name) expected_router = {'router_type': 'shared', 'availability_zone_hints': [self.az_name], 'distributed': False} self._test_router_create_with_flavor( metainfo, expected_router) def test_router_create_with_flavor_distributed(self): """Create distributed router with availability zone and type in flavor """ metainfo = ("{'availability_zone_hints':'%s'," "'distributed':true}" % self.az_name) expected_router = {'distributed': True, 'availability_zone_hints': [self.az_name]} self._test_router_create_with_flavor( metainfo, expected_router) def test_router_flavor_error_parsing(self): """Use the wrong format for the flavor metainfo It should be ignored, and default values are used """ metainfo = "xxx" expected_router = {'distributed': False, 'router_type': 'shared'} self._test_router_create_with_flavor( metainfo, expected_router) def test_router_create_with_syslog_flavor(self): """Create exclusive router with syslog config in flavor""" # Basic config - server IP only ip = '1.1.1.10' expected_router = {'router_type': 'exclusive', 'syslog': {'protocol': 'tcp', 'server_ip': ip}} metainfo = ("{'router_type':'exclusive'," "'syslog':{'server_ip':'%s'}}" % ip) self._iteration = 1 self._test_router_create_with_flavor( metainfo, expected_router) # Advanced config - secondary server IP, protocol and loglevel ip2 = '1.1.1.11' for protocol in ['tcp', 'udp']: for loglevel in ['none', 'debug', 'info', 'warning', 'error']: expected_router = {'router_type': 'exclusive', 'syslog': {'protocol': protocol, 'server_ip': ip, 'server2_ip': ip2}} metainfo = ("{'router_type':'exclusive'," "'syslog':{'server_ip':'%s', 'server2_ip':'%s'," "'protocol':'%s', 'log_level':'%s'}}" % (ip, ip2, protocol, loglevel)) self._iteration += 1 self._test_router_create_with_flavor( metainfo, expected_router) def test_router_create_with_syslog_flavor_error(self): """Create router based on flavor with badly formed syslog metadata Syslog metadata should be ignored """ expected_router = {'router_type': 'exclusive', 'syslog': None} self._iteration = 0 bad_defs = ("'server_ip':'1.1.1.1', 'protocol':'http2'", "'server2_ip':'2.2.2.2'", "'protocol':'tcp'", "'server_ip':'1.1.1.1', 'protocol':'udp','log_level':'pro'", "'log_level':'error'") for meta in bad_defs: metainfo = "{'router_type':'exclusive', 'syslog': {%s}}" % meta self._iteration += 1 self._test_router_create_with_flavor( metainfo, expected_router) def _test_router_create_with_flavor_error( self, metainfo, error_code, create_type=None, create_size=None, create_az=None): router_data = {'flavor_id': 'dummy', 'tenant_id': 'whatever', 'name': 'test_router', 'admin_state_up': True} if create_type is not None: router_data['router_type'] = create_type if create_size is not None: router_data['router_size'] = create_size if create_az is not None: router_data['availability_zone_hints'] = [create_az] flavor_data = {'service_type': plugin_const.L3, 'enabled': True, 'service_profiles': ['profile_id']} # Mock the flavors plugin with mock.patch(self.FLAVOR_PLUGIN + '.get_flavor', return_value=flavor_data): with mock.patch(self.FLAVOR_PLUGIN + '.get_service_profile', return_value={'metainfo': metainfo}): self.assertRaises(error_code, self.plugin.create_router, context.get_admin_context(), {'router': router_data}) def test_router_flavor_size_conflict(self): metainfo = "{'router_size':'large','router_type':'exclusive'}" self._test_router_create_with_flavor_error( metainfo, n_exc.BadRequest, create_size='compact') def test_router_flavor_type_conflict(self): metainfo = "{'router_size':'large','router_type':'exclusive'}" self._test_router_create_with_flavor_error( metainfo, n_exc.BadRequest, create_type='shared') def test_router_flavor_az_conflict(self): metainfo = ("{'availability_zone_hints':'%s'," "'distributed':true}" % self.az_name) self._test_router_create_with_flavor_error( metainfo, n_exc.BadRequest, create_az=['az2']) class DHCPOptsTestCase(test_dhcpopts.TestExtraDhcpOpt, NsxVPluginV2TestCase): def setUp(self, plugin=None): super(test_dhcpopts.ExtraDhcpOptDBTestCase, self).setUp( plugin=PLUGIN_NAME) def test_create_port_with_extradhcpopts(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(opt_list, port['port'][edo_ext.EXTRADHCPOPTS]) def test_create_port_with_extradhcpopts_ipv6_opt_version(self): self.skipTest('No DHCP v6 Support yet') def test_create_port_with_extradhcpopts_ipv4_opt_version(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0', 'ip_version': 4}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123', 'ip_version': 4}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(opt_list, port['port'][edo_ext.EXTRADHCPOPTS]) def test_update_port_with_extradhcpopts_with_same(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] expected_opts = opt_list[:] for i in expected_opts: if i['opt_name'] == upd_opts[0]['opt_name']: i['opt_value'] = upd_opts[0]['opt_value'] break self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_additional_extradhcpopt(self): opt_list = [{'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] expected_opts = copy.deepcopy(opt_list) expected_opts.append(upd_opts[0]) self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_extradhcpopts(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] expected_opts = copy.deepcopy(opt_list) for i in expected_opts: if i['opt_name'] == upd_opts[0]['opt_name']: i['opt_value'] = upd_opts[0]['opt_value'] break self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_extradhcpopt_delete(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': None}] expected_opts = [] expected_opts = [opt for opt in opt_list if opt['opt_name'] != 'bootfile-name'] self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_adding_extradhcpopts(self): opt_list = [] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] expected_opts = copy.deepcopy(upd_opts) self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_blank_name_extradhcpopt(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] upd_opts = [{'opt_name': ' ', 'opt_value': 'pxelinux.0'}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_port_with_empty_router_extradhcpopts(self): self.skipTest('No DHCP support option for router') def test_update_port_with_blank_router_extradhcpopt(self): self.skipTest('No DHCP support option for router') def test_update_port_with_extradhcpopts_ipv6_change_value(self): self.skipTest('No DHCP v6 Support yet') def test_update_port_with_extradhcpopts_add_another_ver_opt(self): self.skipTest('No DHCP v6 Support yet') def test_update_port_with_blank_string_extradhcpopt(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': ' '}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_port_with_none_extradhcpopts(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': None}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] expected = [{'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(expected, port['port'][edo_ext.EXTRADHCPOPTS]) def test_create_port_with_extradhcpopts_codes(self): opt_list = [{'opt_name': '85', 'opt_value': 'cafecafe'}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(opt_list, port['port'][edo_ext.EXTRADHCPOPTS]) def test_update_port_with_extradhcpopts_codes(self): opt_list = [{'opt_name': '85', 'opt_value': 'cafecafe'}] upd_opts = [{'opt_name': '85', 'opt_value': '01010101'}] expected_opts = copy.deepcopy(opt_list) for i in expected_opts: if i['opt_name'] == upd_opts[0]['opt_name']: i['opt_value'] = upd_opts[0]['opt_value'] break self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/test_md_proxy.py0000666000175100017510000003257013244523345024665 0ustar zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from neutron_lib import context from vmware_nsx.db import nsxv_db from vmware_nsx.db import nsxv_models from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.tests.unit.nsx_v import test_plugin PLUGIN_NAME = 'vmware_nsx.plugin.NsxVPlugin' # Run all relevant plugin tests when the metadata proxy is enabled. # Those tests does not specifically test the md_proxy. just verify that # nothing gets broken. class NsxVPluginWithMdV2TestCase(test_plugin.NsxVPluginV2TestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): # Add the metadata configuration cfg.CONF.set_override('mgt_net_moid', 'net-1', group="nsxv") cfg.CONF.set_override('mgt_net_proxy_ips', ['2.2.2.2'], group="nsxv") cfg.CONF.set_override('mgt_net_proxy_netmask', '255.255.255.0', group="nsxv") cfg.CONF.set_override('mgt_net_default_gateway', '1.1.1.1', group="nsxv") cfg.CONF.set_override('nova_metadata_ips', ['3.3.3.3'], group="nsxv") # Add some mocks required for the md code mock_alloc_vnic = mock.patch.object(nsxv_db, 'allocate_edge_vnic') mock_alloc_vnic_inst = mock_alloc_vnic.start() mock_alloc_vnic_inst.return_value = nsxv_models.NsxvEdgeVnicBinding mock.patch.object(edge_utils, "update_internal_interface").start() super(NsxVPluginWithMdV2TestCase, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.context = context.get_admin_context() self.internal_net_id = nsxv_db.get_nsxv_internal_network_for_az( self.context.session, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE, 'default')['network_id'] class TestNetworksWithMdV2(test_plugin.TestNetworksV2, NsxVPluginWithMdV2TestCase): # Skip all the tests that count networks, as there is an # additional internal network for metadata. def test_list_networks_with_sort_native(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_without_pk_in_fields_pagination_emulated(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_with_sort_emulated(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_with_shared(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_without_pk_in_fields_pagination_native(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_with_parameters(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_with_pagination_native(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_with_pagination_reverse_emulated(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_with_pagination_emulated(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_with_pagination_reverse_native(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_with_fields(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_networks_bulk_wrong_input(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_networks_bulk_native_plugin_failure(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_networks_bulk_native_quotas(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_networks_bulk_emulated_plugin_failure(self): self.skipTest("The test is not suitable for the metadata test case") def test_cannot_delete_md_net(self): req = self.new_delete_request('networks', self.internal_net_id) net_del_res = req.get_response(self.api) self.assertEqual(net_del_res.status_int, 400) class TestSubnetsWithMdV2(test_plugin.TestSubnetsV2, NsxVPluginWithMdV2TestCase): # Skip all the tests that count subnets, as there is an # additional internal subnet for metadata. def test_list_subnets_with_sort_native(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_subnets_with_sort_emulated(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_subnets_with_pagination_native(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_subnets_with_parameter(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_subnets_with_pagination_emulated(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_subnets_filtering_by_unknown_filter(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_subnets_shared(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_subnets(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_subnets_bulk_native_plugin_failure(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_subnets_bulk_native_quotas(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_subnets_bulk_emulated_plugin_failure(self): self.skipTest("The test is not suitable for the metadata test case") def test_cannot_delete_md_subnet(self): query_params = "network_id=%s" % self.internal_net_id res = self._list('subnets', neutron_context=self.context, query_params=query_params) internal_sub = res['subnets'][0]['id'] req = self.new_delete_request('subnets', internal_sub) net_del_res = req.get_response(self.api) self.assertEqual(net_del_res.status_int, 400) class TestExclusiveRouterWithMdTestCase( test_plugin.TestExclusiveRouterTestCase, NsxVPluginWithMdV2TestCase): # Skip all the tests that count firewall rules, as there are # some MD specific rules def test_router_set_gateway_with_nosnat(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_interfaces_different_tenants_update_firewall(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_interfaces_with_update_firewall(self): self.skipTest("The test is not suitable for the metadata test case") # Skip all the tests that count routers or ports, as there is # an additional router for the md proxy def test_router_list_with_pagination_reverse(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list_with_sort(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list_with_pagination(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_add_interface_delete_port_after_failure(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_router_fail_at_the_backend(self): self.skipTest("The test is not suitable for the metadata test case") def test_floatingip_delete_router_intf_with_subnet_id_returns_409(self): self.skipTest("The test is not suitable for the metadata test case") def test_floatingip_delete_router_intf_with_port_id_returns_409(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_address_scope_snat_rules(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_address_scope_fw_rules(self): self.skipTest("The test is not suitable for the metadata test case") class TestVdrWithMdTestCase(test_plugin.TestVdrTestCase, NsxVPluginWithMdV2TestCase): # Skip all the tests that count firewall rules, as there are # some MD specific rules def test_router_set_gateway_with_nosnat(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_interfaces_different_tenants_update_firewall(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_interfaces_with_update_firewall(self): self.skipTest("The test is not suitable for the metadata test case") # Skip all the tests that count routers or ports, as there is # an additional router for the md proxy def test_router_list_with_pagination_reverse(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list_with_sort(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list_with_pagination(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_add_interface_delete_port_after_failure(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_router_fail_at_the_backend(self): self.skipTest("The test is not suitable for the metadata test case") def test_floatingip_delete_router_intf_with_subnet_id_returns_409(self): self.skipTest("The test is not suitable for the metadata test case") def test_floatingip_delete_router_intf_with_port_id_returns_409(self): self.skipTest("The test is not suitable for the metadata test case") #TODO(asarfaty): fix some mocks so those tests will pass def test_router_plr_binding_default_size(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_plr_binding_configured_size(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_plr_binding_default_az(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_plr_binding_with_az(self): self.skipTest("The test is not suitable for the metadata test case") class TestSharedRouterWithMdTestCase(test_plugin.TestSharedRouterTestCase, NsxVPluginWithMdV2TestCase): # Skip all the tests that count firewall rules, as there are # some MD specific rules def test_router_set_gateway_with_nosnat(self): self.skipTest("The test is not suitable for the metadata test case") def test_routers_set_gateway_with_nosnat(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_interfaces_different_tenants_update_firewall(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_interfaces_with_update_firewall(self): self.skipTest("The test is not suitable for the metadata test case") # Skip all the tests that count routers or ports, as there is # an additional router for the md proxy def test_router_list_with_pagination_reverse(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list_with_sort(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list_with_pagination(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_add_interface_delete_port_after_failure(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_router_fail_at_the_backend(self): self.skipTest("The test is not suitable for the metadata test case") def test_floatingip_delete_router_intf_with_subnet_id_returns_409(self): self.skipTest("The test is not suitable for the metadata test case") def test_floatingip_delete_router_intf_with_port_id_returns_409(self): self.skipTest("The test is not suitable for the metadata test case") vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsx_v/test_nsxv_loadbalancer.py0000666000175100017510000001107213244523345026503 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_serialization import jsonutils from neutron.tests import base from vmware_nsx.plugins.nsx_v.vshield import nsxv_loadbalancer from vmware_nsx.plugins.nsx_v.vshield import vcns class NsxvLoadbalancerTestCase(base.BaseTestCase): EDGE_OBJ_JSON = ( '{"accelerationEnabled":false,"applicationProfile":[{' '"applicationProfileId":"applicationProfile-1","insertXForwardedFor":' 'false,"name":"MDSrvProxy","persistence":{"cookieMode":"insert",' '"cookieName":"JSESSIONID","expire":"30","method":"cookie"},' '"serverSslEnabled":false,"sslPassthrough":false,"template":"HTTP"}],' '"applicationRule":[],"enableServiceInsertion":false,"enabled":true,' '"featureType":"loadbalancer_4.0","logging":{"enable":false,' '"logLevel":"info"},"monitor":[{"interval":10,"maxRetries":3,"method":' '"GET","monitorId":"monitor-1","name":"MDSrvMon","timeout":15,"type":' '"http","url":"/"}],"pool":[{"algorithm":"round-robin",' '"applicationRuleId":[],"member":[{"condition":"enabled","ipAddress":' '"192.168.0.39","maxConn":0,"memberId":"member-1","minConn":0,' '"monitorPort":8775,"name":"Member-1","port":8775,"weight":1}],' '"monitorId":["monitor-1"],"name":"MDSrvPool","poolId":"pool-1",' '"transparent":false}],"version":6,"virtualServer":[{' '"accelerationEnabled":false,"applicationProfileId":' '"applicationProfile-1","applicationRuleId":[],"connectionLimit":0,' '"defaultPoolId":"pool-1","enableServiceInsertion":false,' '"enabled":true,"ipAddress":"169.254.0.3","name":"MdSrv",' '"port":"8775","protocol":"http","virtualServerId":' '"virtualServer-1"}]}') OUT_OBJ_JSON = ( '{"accelerationEnabled": false, "applicationProfile": [{' '"applicationProfileId": "applicationProfile-1", ' '"insertXForwardedFor": false, "name": "MDSrvProxy", "persistence": ' '{"expire": "30", "method": "cookie"}, "serverSslEnabled": false, ' '"sslPassthrough": false, "template": "HTTP"}],' ' "enableServiceInsertion": false, "enabled": true, "featureType": ' '"loadbalancer_4.0", "monitor": [{"interval": 10, "maxRetries": 3, ' '"method": "GET", "monitorId": "monitor-1", "name": "MDSrvMon", ' '"timeout": 15, "type": "http", "url": "/"}], "pool": [{"algorithm":' ' "round-robin", "member": [{"condition": "enabled", "ipAddress": ' '"192.168.0.39", "maxConn": 0, "memberId": "member-1", "minConn": 0, ' '"monitorPort": 8775, "name": "Member-1", "port": 8775, "weight": 1}],' ' "monitorId": ["monitor-1"], "name": "MDSrvPool", "poolId": "pool-1",' ' "transparent": false}], "virtualServer": [{"accelerationEnabled": ' 'false, "applicationProfileId": "applicationProfile-1", ' '"connectionLimit": 0, "defaultPoolId": "pool-1", ' '"enableServiceInsertion": false, "enabled": true, "ipAddress": ' '"169.254.0.3", "name": "MdSrv", "port": "8775", "protocol": ' '"http", "virtualServerId": "virtualServer-1"}]}') LB_URI = '/api/4.0/edges/%s/loadbalancer/config' EDGE_1 = 'edge-x' EDGE_2 = 'edge-y' def setUp(self): super(NsxvLoadbalancerTestCase, self).setUp() self._lb = nsxv_loadbalancer.NsxvLoadbalancer() self._vcns = vcns.Vcns(None, None, None, None, True) def test_get_edge_loadbalancer(self): h = None v = jsonutils.loads(self.EDGE_OBJ_JSON) with mock.patch.object(self._vcns, 'do_request', return_value=(h, v)) as mock_do_request: lb = nsxv_loadbalancer.NsxvLoadbalancer.get_loadbalancer( self._vcns, self.EDGE_1) lb.submit_to_backend(self._vcns, self.EDGE_2) mock_do_request.assert_called_with( vcns.HTTP_PUT, self.LB_URI % self.EDGE_2, self.OUT_OBJ_JSON, format='json', encode=False) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsxlib/0000775000175100017510000000000013244524600021537 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsxlib/__init__.py0000666000175100017510000000000013244523345023645 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsxlib/mh/0000775000175100017510000000000013244524600022143 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsxlib/mh/test_switch.py0000666000175100017510000003475313244523345025100 0ustar zuulzuul00000000000000# Copyright (c) 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import hashlib import mock from neutron.tests.unit.api.v2 import test_base from neutron_lib import constants from neutron_lib import exceptions from vmware_nsx.common import utils from vmware_nsx.nsxlib.mh import switch as switchlib from vmware_nsx.tests.unit.nsxlib.mh import base _uuid = test_base._uuid class LogicalSwitchesTestCase(base.NsxlibTestCase): def test_create_and_get_lswitches_single(self): tenant_id = 'pippo' transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] lswitch = switchlib.create_lswitch(self.fake_cluster, _uuid(), tenant_id, 'fake-switch', transport_zones_config) res_lswitch = switchlib.get_lswitches(self.fake_cluster, lswitch['uuid']) self.assertEqual(len(res_lswitch), 1) self.assertEqual(res_lswitch[0]['uuid'], lswitch['uuid']) def test_create_and_get_lswitches_single_name_exceeds_40_chars(self): tenant_id = 'pippo' transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] lswitch = switchlib.create_lswitch(self.fake_cluster, tenant_id, _uuid(), '*' * 50, transport_zones_config) res_lswitch = switchlib.get_lswitches(self.fake_cluster, lswitch['uuid']) self.assertEqual(len(res_lswitch), 1) self.assertEqual(res_lswitch[0]['uuid'], lswitch['uuid']) self.assertEqual(res_lswitch[0]['display_name'], '*' * 40) def test_create_and_get_lswitches_multiple(self): tenant_id = 'pippo' transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] network_id = _uuid() main_lswitch = switchlib.create_lswitch( self.fake_cluster, network_id, tenant_id, 'fake-switch', transport_zones_config, tags=[{'scope': 'multi_lswitch', 'tag': 'True'}]) # Create secondary lswitch second_lswitch = switchlib.create_lswitch( self.fake_cluster, network_id, tenant_id, 'fake-switch-2', transport_zones_config) res_lswitch = switchlib.get_lswitches(self.fake_cluster, network_id) self.assertEqual(len(res_lswitch), 2) switch_uuids = [ls['uuid'] for ls in res_lswitch] self.assertIn(main_lswitch['uuid'], switch_uuids) self.assertIn(second_lswitch['uuid'], switch_uuids) for ls in res_lswitch: if ls['uuid'] == main_lswitch['uuid']: main_ls = ls else: second_ls = ls main_ls_tags = self._build_tag_dict(main_ls['tags']) second_ls_tags = self._build_tag_dict(second_ls['tags']) self.assertIn('multi_lswitch', main_ls_tags) self.assertNotIn('multi_lswitch', second_ls_tags) self.assertIn('quantum_net_id', main_ls_tags) self.assertIn('quantum_net_id', second_ls_tags) self.assertEqual(main_ls_tags['quantum_net_id'], network_id) self.assertEqual(second_ls_tags['quantum_net_id'], network_id) def _test_update_lswitch(self, tenant_id, name, tags): transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] lswitch = switchlib.create_lswitch(self.fake_cluster, _uuid(), 'pippo', 'fake-switch', transport_zones_config) switchlib.update_lswitch(self.fake_cluster, lswitch['uuid'], name, tenant_id=tenant_id, tags=tags) res_lswitch = switchlib.get_lswitches(self.fake_cluster, lswitch['uuid']) self.assertEqual(len(res_lswitch), 1) self.assertEqual(res_lswitch[0]['display_name'], name) if not tags: # no need to validate tags return switch_tags = self._build_tag_dict(res_lswitch[0]['tags']) for tag in tags: self.assertIn(tag['scope'], switch_tags) self.assertEqual(tag['tag'], switch_tags[tag['scope']]) def test_update_lswitch(self): self._test_update_lswitch(None, 'new-name', [{'scope': 'new_tag', 'tag': 'xxx'}]) def test_update_lswitch_no_tags(self): self._test_update_lswitch(None, 'new-name', None) def test_update_lswitch_tenant_id(self): self._test_update_lswitch('whatever', 'new-name', None) def test_update_non_existing_lswitch_raises(self): self.assertRaises(exceptions.NetworkNotFound, switchlib.update_lswitch, self.fake_cluster, 'whatever', 'foo', 'bar') def test_delete_networks(self): transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] lswitch = switchlib.create_lswitch(self.fake_cluster, _uuid(), 'pippo', 'fake-switch', transport_zones_config) switchlib.delete_networks(self.fake_cluster, lswitch['uuid'], [lswitch['uuid']]) self.assertRaises(exceptions.NotFound, switchlib.get_lswitches, self.fake_cluster, lswitch['uuid']) def test_delete_non_existing_lswitch_raises(self): self.assertRaises(exceptions.NetworkNotFound, switchlib.delete_networks, self.fake_cluster, 'whatever', ['whatever']) class LogicalPortsTestCase(base.NsxlibTestCase): def _create_switch_and_port(self, tenant_id='pippo', neutron_port_id='whatever', name='name', device_id='device_id'): transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] lswitch = switchlib.create_lswitch(self.fake_cluster, _uuid(), tenant_id, 'fake-switch', transport_zones_config) lport = switchlib.create_lport(self.fake_cluster, lswitch['uuid'], tenant_id, neutron_port_id, name, device_id, True) return lswitch, lport def test_create_and_get_port(self): lswitch, lport = self._create_switch_and_port() lport_res = switchlib.get_port(self.fake_cluster, lswitch['uuid'], lport['uuid']) self.assertEqual(lport['uuid'], lport_res['uuid']) # Try again with relation lport_res = switchlib.get_port(self.fake_cluster, lswitch['uuid'], lport['uuid'], relations='LogicalPortStatus') self.assertEqual(lport['uuid'], lport_res['uuid']) def test_plug_interface(self): lswitch, lport = self._create_switch_and_port() switchlib.plug_vif_interface(self.fake_cluster, lswitch['uuid'], lport['uuid'], 'VifAttachment', 'fake') lport_res = switchlib.get_port(self.fake_cluster, lswitch['uuid'], lport['uuid']) self.assertEqual(lport['uuid'], lport_res['uuid']) def test_get_port_by_tag(self): lswitch, lport = self._create_switch_and_port() lport2 = switchlib.get_port_by_neutron_tag(self.fake_cluster, lswitch['uuid'], 'whatever') self.assertIsNotNone(lport2) self.assertEqual(lport['uuid'], lport2['uuid']) def test_get_port_by_tag_not_found_with_switch_id_raises_not_found(self): tenant_id = 'pippo' neutron_port_id = 'whatever' transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] lswitch = switchlib.create_lswitch( self.fake_cluster, tenant_id, _uuid(), 'fake-switch', transport_zones_config) self.assertRaises(exceptions.NotFound, switchlib.get_port_by_neutron_tag, self.fake_cluster, lswitch['uuid'], neutron_port_id) def test_get_port_by_tag_not_find_wildcard_lswitch_returns_none(self): tenant_id = 'pippo' neutron_port_id = 'whatever' transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] switchlib.create_lswitch( self.fake_cluster, tenant_id, _uuid(), 'fake-switch', transport_zones_config) lport = switchlib.get_port_by_neutron_tag( self.fake_cluster, '*', neutron_port_id) self.assertIsNone(lport) def test_get_port_status(self): lswitch, lport = self._create_switch_and_port() status = switchlib.get_port_status( self.fake_cluster, lswitch['uuid'], lport['uuid']) self.assertEqual(constants.PORT_STATUS_ACTIVE, status) def test_get_port_status_non_existent_raises(self): self.assertRaises(exceptions.PortNotFoundOnNetwork, switchlib.get_port_status, self.fake_cluster, 'boo', 'boo') def test_update_port(self): lswitch, lport = self._create_switch_and_port() switchlib.update_port( self.fake_cluster, lswitch['uuid'], lport['uuid'], 'neutron_port_id', 'pippo2', 'new_name', 'device_id', False) lport_res = switchlib.get_port(self.fake_cluster, lswitch['uuid'], lport['uuid']) self.assertEqual(lport['uuid'], lport_res['uuid']) self.assertEqual('new_name', lport_res['display_name']) self.assertEqual('False', lport_res['admin_status_enabled']) port_tags = self._build_tag_dict(lport_res['tags']) self.assertIn('os_tid', port_tags) self.assertIn('q_port_id', port_tags) self.assertIn('vm_id', port_tags) def test_create_port_device_id_less_than_40_chars(self): lswitch, lport = self._create_switch_and_port() lport_res = switchlib.get_port(self.fake_cluster, lswitch['uuid'], lport['uuid']) port_tags = self._build_tag_dict(lport_res['tags']) self.assertEqual('device_id', port_tags['vm_id']) def test_create_port_device_id_more_than_40_chars(self): dev_id = "this_is_a_very_long_device_id_with_lots_of_characters" lswitch, lport = self._create_switch_and_port(device_id=dev_id) lport_res = switchlib.get_port(self.fake_cluster, lswitch['uuid'], lport['uuid']) port_tags = self._build_tag_dict(lport_res['tags']) self.assertNotEqual(len(dev_id), len(port_tags['vm_id'])) def test_get_ports_with_obsolete_and_new_vm_id_tag(self): def obsolete(device_id, obfuscate=False): return hashlib.sha1(device_id.encode()).hexdigest() with mock.patch.object(utils, 'device_id_to_vm_id', new=obsolete): dev_id1 = "short-dev-id-1" _, lport1 = self._create_switch_and_port(device_id=dev_id1) dev_id2 = "short-dev-id-2" _, lport2 = self._create_switch_and_port(device_id=dev_id2) lports = switchlib.get_ports(self.fake_cluster, None, [dev_id1]) port_tags = self._build_tag_dict(lports['whatever']['tags']) self.assertNotEqual(dev_id1, port_tags['vm_id']) lports = switchlib.get_ports(self.fake_cluster, None, [dev_id2]) port_tags = self._build_tag_dict(lports['whatever']['tags']) self.assertEqual(dev_id2, port_tags['vm_id']) def test_update_non_existent_port_raises(self): self.assertRaises(exceptions.PortNotFoundOnNetwork, switchlib.update_port, self.fake_cluster, 'boo', 'boo', 'boo', 'boo', 'boo', 'boo', False) def test_delete_port(self): lswitch, lport = self._create_switch_and_port() switchlib.delete_port(self.fake_cluster, lswitch['uuid'], lport['uuid']) self.assertRaises(exceptions.PortNotFoundOnNetwork, switchlib.get_port, self.fake_cluster, lswitch['uuid'], lport['uuid']) def test_delete_non_existent_port_raises(self): lswitch = self._create_switch_and_port()[0] self.assertRaises(exceptions.PortNotFoundOnNetwork, switchlib.delete_port, self.fake_cluster, lswitch['uuid'], 'bad_port_uuid') def test_query_lswitch_ports(self): lswitch, lport = self._create_switch_and_port() switch_port_uuids = [ switchlib.create_lport( self.fake_cluster, lswitch['uuid'], 'pippo', 'qportid-%s' % k, 'port-%s' % k, 'deviceid-%s' % k, True)['uuid'] for k in range(2)] switch_port_uuids.append(lport['uuid']) ports = switchlib.query_lswitch_lports( self.fake_cluster, lswitch['uuid']) self.assertEqual(len(ports), 3) for res_port in ports: self.assertIn(res_port['uuid'], switch_port_uuids) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsxlib/mh/test_l2gateway.py0000666000175100017510000003311413244523345025464 0ustar zuulzuul00000000000000# Copyright (c) 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock from neutron.tests.unit.api.v2 import test_base from oslo_serialization import jsonutils from vmware_nsx.api_client import exception from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import utils as nsx_utils from vmware_nsx.nsxlib import mh as nsxlib from vmware_nsx.nsxlib.mh import l2gateway as l2gwlib from vmware_nsx.nsxlib.mh import switch as switchlib from vmware_nsx.tests.unit.nsxlib.mh import base _uuid = test_base._uuid class L2GatewayNegativeTestCase(base.NsxlibNegativeBaseTestCase): def test_create_l2_gw_service_on_failure(self): self.assertRaises(exception.NsxApiException, l2gwlib.create_l2_gw_service, self.fake_cluster, 'fake-tenant', 'fake-gateway', [{'id': _uuid(), 'interface_name': 'xxx'}]) def test_delete_l2_gw_service_on_failure(self): self.assertRaises(exception.NsxApiException, l2gwlib.delete_l2_gw_service, self.fake_cluster, 'fake-gateway') def test_get_l2_gw_service_on_failure(self): self.assertRaises(exception.NsxApiException, l2gwlib.get_l2_gw_service, self.fake_cluster, 'fake-gateway') def test_update_l2_gw_service_on_failure(self): self.assertRaises(exception.NsxApiException, l2gwlib.update_l2_gw_service, self.fake_cluster, 'fake-gateway', 'pluto') class L2GatewayTestCase(base.NsxlibTestCase): def _create_gw_service(self, node_uuid, display_name, tenant_id='fake_tenant'): return l2gwlib.create_l2_gw_service(self.fake_cluster, tenant_id, display_name, [{'id': node_uuid, 'interface_name': 'xxx'}]) def test_create_l2_gw_service(self): display_name = 'fake-gateway' node_uuid = _uuid() response = self._create_gw_service(node_uuid, display_name) self.assertEqual(response.get('type'), 'L2GatewayServiceConfig') self.assertEqual(response.get('display_name'), display_name) gateways = response.get('gateways', []) self.assertEqual(len(gateways), 1) self.assertEqual(gateways[0]['type'], 'L2Gateway') self.assertEqual(gateways[0]['device_id'], 'xxx') self.assertEqual(gateways[0]['transport_node_uuid'], node_uuid) def test_update_l2_gw_service(self): display_name = 'fake-gateway' new_display_name = 'still-fake-gateway' node_uuid = _uuid() res1 = self._create_gw_service(node_uuid, display_name) gw_id = res1['uuid'] res2 = l2gwlib.update_l2_gw_service( self.fake_cluster, gw_id, new_display_name) self.assertEqual(res2['display_name'], new_display_name) def test_get_l2_gw_service(self): display_name = 'fake-gateway' node_uuid = _uuid() gw_id = self._create_gw_service(node_uuid, display_name)['uuid'] response = l2gwlib.get_l2_gw_service(self.fake_cluster, gw_id) self.assertEqual(response.get('type'), 'L2GatewayServiceConfig') self.assertEqual(response.get('display_name'), display_name) self.assertEqual(response.get('uuid'), gw_id) def test_list_l2_gw_service(self): gw_ids = [] for name in ('fake-1', 'fake-2'): gw_ids.append(self._create_gw_service(_uuid(), name)['uuid']) results = l2gwlib.get_l2_gw_services(self.fake_cluster) self.assertEqual(len(results), 2) self.assertEqual(sorted(gw_ids), sorted([r['uuid'] for r in results])) def test_list_l2_gw_service_by_tenant(self): gw_ids = [self._create_gw_service( _uuid(), name, tenant_id=name)['uuid'] for name in ('fake-1', 'fake-2')] results = l2gwlib.get_l2_gw_services(self.fake_cluster, tenant_id='fake-1') self.assertEqual(len(results), 1) self.assertEqual(results[0]['uuid'], gw_ids[0]) def test_delete_l2_gw_service(self): display_name = 'fake-gateway' node_uuid = _uuid() gw_id = self._create_gw_service(node_uuid, display_name)['uuid'] l2gwlib.delete_l2_gw_service(self.fake_cluster, gw_id) results = l2gwlib.get_l2_gw_services(self.fake_cluster) self.assertEqual(len(results), 0) def test_plug_l2_gw_port_attachment(self): tenant_id = 'pippo' node_uuid = _uuid() transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] lswitch = switchlib.create_lswitch( self.fake_cluster, _uuid(), tenant_id, 'fake-switch', transport_zones_config) gw_id = self._create_gw_service(node_uuid, 'fake-gw')['uuid'] lport = switchlib.create_lport( self.fake_cluster, lswitch['uuid'], tenant_id, _uuid(), 'fake-gw-port', gw_id, True) l2gwlib.plug_l2_gw_service( self.fake_cluster, lswitch['uuid'], lport['uuid'], gw_id) uri = nsxlib._build_uri_path(switchlib.LSWITCHPORT_RESOURCE, lport['uuid'], lswitch['uuid'], is_attachment=True) resp_obj = nsxlib.do_request("GET", uri, cluster=self.fake_cluster) self.assertIn('LogicalPortAttachment', resp_obj) self.assertEqual(resp_obj['LogicalPortAttachment']['type'], 'L2GatewayAttachment') def _create_expected_req_body(self, display_name, neutron_id, connector_type, connector_ip, client_certificate): body = { "display_name": display_name, "tags": [{"tag": neutron_id, "scope": "q_gw_dev_id"}, {"tag": 'fake_tenant', "scope": "os_tid"}, {"tag": nsx_utils.NEUTRON_VERSION, "scope": "quantum"}], "transport_connectors": [ {"transport_zone_uuid": 'fake_tz_uuid', "ip_address": connector_ip, "type": '%sConnector' % connector_type}], "admin_status_enabled": True } body.get("tags").sort(key=lambda x: x['tag']) if client_certificate: body["credential"] = { "client_certificate": { "pem_encoded": client_certificate}, "type": "SecurityCertificateCredential"} return body def test_create_gw_device(self): # NOTE(salv-orlando): This unit test mocks backend calls rather than # leveraging the fake NSX API client display_name = 'fake-device' neutron_id = 'whatever' connector_type = 'stt' connector_ip = '1.1.1.1' client_certificate = 'this_should_be_a_certificate' with mock.patch.object(nsxlib, 'do_request') as request_mock: expected_req_body = self._create_expected_req_body( display_name, neutron_id, connector_type.upper(), connector_ip, client_certificate) l2gwlib.create_gateway_device( self.fake_cluster, 'fake_tenant', display_name, neutron_id, 'fake_tz_uuid', connector_type, connector_ip, client_certificate) request_mock.assert_called_once_with( "POST", "/ws.v1/transport-node", jsonutils.dumps(expected_req_body, sort_keys=True), cluster=self.fake_cluster) def test_create_gw_device_with_invalid_transport_type_raises(self): display_name = 'fake-device' neutron_id = 'whatever' connector_type = 'foo' connector_ip = '1.1.1.1' client_certificate = 'this_should_be_a_certificate' self.assertRaises(nsx_exc.InvalidTransportType, l2gwlib.create_gateway_device, self.fake_cluster, 'fake_tenant', display_name, neutron_id, 'fake_tz_uuid', connector_type, connector_ip, client_certificate) def test_update_gw_device(self): # NOTE(salv-orlando): This unit test mocks backend calls rather than # leveraging the fake NSX API client display_name = 'fake-device' neutron_id = 'whatever' connector_type = 'stt' connector_ip = '1.1.1.1' client_certificate = 'this_should_be_a_certificate' with mock.patch.object(nsxlib, 'do_request') as request_mock: expected_req_body = self._create_expected_req_body( display_name, neutron_id, connector_type.upper(), connector_ip, client_certificate) l2gwlib.update_gateway_device( self.fake_cluster, 'whatever', 'fake_tenant', display_name, neutron_id, 'fake_tz_uuid', connector_type, connector_ip, client_certificate) request_mock.assert_called_once_with( "PUT", "/ws.v1/transport-node/whatever", jsonutils.dumps(expected_req_body, sort_keys=True), cluster=self.fake_cluster) def test_update_gw_device_without_certificate(self): # NOTE(salv-orlando): This unit test mocks backend calls rather than # leveraging the fake NSX API client display_name = 'fake-device' neutron_id = 'whatever' connector_type = 'stt' connector_ip = '1.1.1.1' with mock.patch.object(nsxlib, 'do_request') as request_mock: expected_req_body = self._create_expected_req_body( display_name, neutron_id, connector_type.upper(), connector_ip, None) l2gwlib.update_gateway_device( self.fake_cluster, 'whatever', 'fake_tenant', display_name, neutron_id, 'fake_tz_uuid', connector_type, connector_ip, client_certificate=None) request_mock.assert_called_once_with( "PUT", "/ws.v1/transport-node/whatever", jsonutils.dumps(expected_req_body, sort_keys=True), cluster=self.fake_cluster) def test_get_gw_device_status(self): # NOTE(salv-orlando): This unit test mocks backend calls rather than # leveraging the fake NSX API client with mock.patch.object(nsxlib, 'do_request') as request_mock: l2gwlib.get_gateway_device_status(self.fake_cluster, 'whatever') request_mock.assert_called_once_with( "GET", "/ws.v1/transport-node/whatever/status", cluster=self.fake_cluster) def test_get_gw_devices_status(self): # NOTE(salv-orlando): This unit test mocks backend calls rather than # leveraging the fake NSX API client with mock.patch.object(nsxlib, 'do_request') as request_mock: request_mock.return_value = { 'results': [], 'page_cursor': None, 'result_count': 0} l2gwlib.get_gateway_devices_status(self.fake_cluster) request_mock.assert_called_once_with( "GET", ("/ws.v1/transport-node?fields=uuid,tags&" "relations=TransportNodeStatus&" "_page_length=1000&tag_scope=quantum"), cluster=self.fake_cluster) def test_get_gw_devices_status_filter_by_tenant(self): # NOTE(salv-orlando): This unit test mocks backend calls rather than # leveraging the fake NSX API client with mock.patch.object(nsxlib, 'do_request') as request_mock: request_mock.return_value = { 'results': [], 'page_cursor': None, 'result_count': 0} l2gwlib.get_gateway_devices_status(self.fake_cluster, tenant_id='ssc_napoli') request_mock.assert_called_once_with( "GET", ("/ws.v1/transport-node?fields=uuid,tags&" "relations=TransportNodeStatus&" "tag=ssc_napoli&tag_scope=os_tid&" "_page_length=1000&tag_scope=quantum"), cluster=self.fake_cluster) def test_delete_gw_device(self): # NOTE(salv-orlando): This unit test mocks backend calls rather than # leveraging the fake NSX API client with mock.patch.object(nsxlib, 'do_request') as request_mock: l2gwlib.delete_gateway_device(self.fake_cluster, 'whatever') request_mock.assert_called_once_with( "DELETE", "/ws.v1/transport-node/whatever", cluster=self.fake_cluster) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsxlib/mh/test_router.py0000666000175100017510000013321613244523345025111 0ustar zuulzuul00000000000000# Copyright (c) 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock from neutron.tests.unit.api.v2 import test_base from neutron_lib import exceptions from oslo_config import cfg from oslo_utils import uuidutils from vmware_nsx.api_client import exception as api_exc from vmware_nsx.api_client import version as ver_module from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import utils from vmware_nsx.nsxlib import mh as nsxlib from vmware_nsx.nsxlib.mh import router as routerlib from vmware_nsx.nsxlib.mh import switch as switchlib from vmware_nsx.tests.unit.nsxlib.mh import base _uuid = test_base._uuid class TestNatRules(base.NsxlibTestCase): def _test_create_lrouter_dnat_rule(self, version): with mock.patch.object(self.fake_cluster.api_client, 'get_version', new=lambda: ver_module.Version(version)): tenant_id = 'pippo' lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), tenant_id, 'fake_router', '192.168.0.1') nat_rule = routerlib.create_lrouter_dnat_rule( self.fake_cluster, lrouter['uuid'], '10.0.0.99', match_criteria={'destination_ip_addresses': '192.168.0.5'}) uri = nsxlib._build_uri_path(routerlib.LROUTERNAT_RESOURCE, nat_rule['uuid'], lrouter['uuid']) resp_obj = nsxlib.do_request("GET", uri, cluster=self.fake_cluster) self.assertEqual('DestinationNatRule', resp_obj['type']) self.assertEqual('192.168.0.5', resp_obj['match']['destination_ip_addresses']) def test_create_lrouter_dnat_rule_v2(self): self._test_create_lrouter_dnat_rule('2.9') def test_create_lrouter_dnat_rule_v31(self): self._test_create_lrouter_dnat_rule('3.1') class TestExplicitLRouters(base.NsxlibTestCase): def setUp(self): self.fake_version = '3.2' super(TestExplicitLRouters, self).setUp() def _get_lrouter(self, tenant_id, router_name, router_id, relations=None): schema = '/ws.v1/schema/RoutingTableRoutingConfig' router = {'display_name': router_name, 'uuid': router_id, 'tags': utils.get_tags(os_tid=tenant_id), 'distributed': False, 'routing_config': {'type': 'RoutingTableRoutingConfig', '_schema': schema}, '_schema': schema, 'nat_synchronization_enabled': True, 'replication_mode': 'service', 'type': 'LogicalRouterConfig', '_href': '/ws.v1/lrouter/%s' % router_id, } if relations: router['_relations'] = relations return router def _get_single_route(self, router_id, route_id='fake_route_id_0', prefix='0.0.0.0/0', next_hop_ip='1.1.1.1'): return {'protocol': 'static', '_href': '/ws.v1/lrouter/%s/rib/%s' % (router_id, route_id), 'prefix': prefix, '_schema': '/ws.v1/schema/RoutingTableEntry', 'next_hop_ip': next_hop_ip, 'action': 'accept', 'uuid': route_id} def test_prepare_body_with_implicit_routing_config(self): router_name = 'fake_router_name' tenant_id = 'fake_tenant_id' neutron_router_id = 'pipita_higuain' router_type = 'SingleDefaultRouteImplicitRoutingConfig' route_config = { 'default_route_next_hop': {'gateway_ip_address': 'fake_address', 'type': 'RouterNextHop'}, } body = routerlib._prepare_lrouter_body(router_name, neutron_router_id, tenant_id, router_type, **route_config) expected = {'display_name': 'fake_router_name', 'routing_config': { 'default_route_next_hop': {'gateway_ip_address': 'fake_address', 'type': 'RouterNextHop'}, 'type': 'SingleDefaultRouteImplicitRoutingConfig'}, 'tags': utils.get_tags(os_tid='fake_tenant_id', q_router_id='pipita_higuain'), 'type': 'LogicalRouterConfig', 'replication_mode': cfg.CONF.NSX.replication_mode} self.assertEqual(expected, body) def test_prepare_body_without_routing_config(self): router_name = 'fake_router_name' tenant_id = 'fake_tenant_id' neutron_router_id = 'marekiaro_hamsik' router_type = 'RoutingTableRoutingConfig' body = routerlib._prepare_lrouter_body(router_name, neutron_router_id, tenant_id, router_type) expected = {'display_name': 'fake_router_name', 'routing_config': {'type': 'RoutingTableRoutingConfig'}, 'tags': utils.get_tags(os_tid='fake_tenant_id', q_router_id='marekiaro_hamsik'), 'type': 'LogicalRouterConfig', 'replication_mode': cfg.CONF.NSX.replication_mode} self.assertEqual(expected, body) def test_get_lrouter(self): tenant_id = 'fake_tenant_id' router_name = 'fake_router_name' router_id = 'fake_router_id' relations = { 'LogicalRouterStatus': {'_href': '/ws.v1/lrouter/%s/status' % router_id, 'lport_admin_up_count': 1, '_schema': '/ws.v1/schema/LogicalRouterStatus', 'lport_count': 1, 'fabric_status': True, 'type': 'LogicalRouterStatus', 'lport_link_up_count': 0, }, } with mock.patch.object(nsxlib, 'do_request', return_value=self._get_lrouter(tenant_id, router_name, router_id, relations)): lrouter = routerlib.get_lrouter(self.fake_cluster, router_id) self.assertTrue( lrouter['_relations']['LogicalRouterStatus']['fabric_status']) def test_create_lrouter(self): tenant_id = 'fake_tenant_id' router_name = 'fake_router_name' router_id = 'fake_router_id' nexthop_ip = '10.0.0.1' with mock.patch.object( nsxlib, 'do_request', return_value=self._get_lrouter(tenant_id, router_name, router_id)): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), tenant_id, router_name, nexthop_ip) self.assertEqual(lrouter['routing_config']['type'], 'RoutingTableRoutingConfig') self.assertNotIn('default_route_next_hop', lrouter['routing_config']) def test_update_lrouter_with_no_routes(self): router_id = 'fake_router_id' new_routes = [{"nexthop": "10.0.0.2", "destination": "169.254.169.0/30"}, ] nsx_routes = [self._get_single_route(router_id)] with mock.patch.object(routerlib, 'get_explicit_routes_lrouter', return_value=nsx_routes): with mock.patch.object(routerlib, 'create_explicit_route_lrouter', return_value='fake_uuid'): old_routes = routerlib.update_explicit_routes_lrouter( self.fake_cluster, router_id, new_routes) self.assertEqual(old_routes, nsx_routes) def test_update_lrouter_with_no_routes_raise_nsx_exception(self): router_id = 'fake_router_id' new_routes = [{"nexthop": "10.0.0.2", "destination": "169.254.169.0/30"}, ] nsx_routes = [self._get_single_route(router_id)] with mock.patch.object(routerlib, 'get_explicit_routes_lrouter', return_value=nsx_routes): with mock.patch.object(routerlib, 'create_explicit_route_lrouter', side_effect=api_exc.NsxApiException): self.assertRaises(api_exc.NsxApiException, routerlib.update_explicit_routes_lrouter, self.fake_cluster, router_id, new_routes) def test_update_lrouter_with_routes(self): router_id = 'fake_router_id' new_routes = [{"next_hop_ip": "10.0.0.2", "prefix": "169.254.169.0/30"}, ] nsx_routes = [self._get_single_route(router_id), self._get_single_route(router_id, 'fake_route_id_1', '0.0.0.1/24', '10.0.0.3'), self._get_single_route(router_id, 'fake_route_id_2', '0.0.0.2/24', '10.0.0.4'), ] with mock.patch.object(routerlib, 'get_explicit_routes_lrouter', return_value=nsx_routes): with mock.patch.object(routerlib, 'delete_explicit_route_lrouter', return_value=None): with mock.patch.object(routerlib, 'create_explicit_route_lrouter', return_value='fake_uuid'): old_routes = routerlib.update_explicit_routes_lrouter( self.fake_cluster, router_id, new_routes) self.assertEqual(old_routes, nsx_routes) def test_update_lrouter_with_routes_raises_nsx_expception(self): router_id = 'fake_router_id' new_routes = [{"nexthop": "10.0.0.2", "destination": "169.254.169.0/30"}, ] nsx_routes = [self._get_single_route(router_id), self._get_single_route(router_id, 'fake_route_id_1', '0.0.0.1/24', '10.0.0.3'), self._get_single_route(router_id, 'fake_route_id_2', '0.0.0.2/24', '10.0.0.4'), ] with mock.patch.object(routerlib, 'get_explicit_routes_lrouter', return_value=nsx_routes): with mock.patch.object(routerlib, 'delete_explicit_route_lrouter', side_effect=api_exc.NsxApiException): with mock.patch.object( routerlib, 'create_explicit_route_lrouter', return_value='fake_uuid'): self.assertRaises( api_exc.NsxApiException, routerlib.update_explicit_routes_lrouter, self.fake_cluster, router_id, new_routes) class RouterNegativeTestCase(base.NsxlibNegativeBaseTestCase): def test_create_lrouter_on_failure(self): self.assertRaises(api_exc.NsxApiException, routerlib.create_lrouter, self.fake_cluster, uuidutils.generate_uuid(), 'pluto', 'fake_router', 'my_hop') def test_delete_lrouter_on_failure(self): self.assertRaises(api_exc.NsxApiException, routerlib.delete_lrouter, self.fake_cluster, 'fake_router') def test_get_lrouter_on_failure(self): self.assertRaises(api_exc.NsxApiException, routerlib.get_lrouter, self.fake_cluster, 'fake_router') def test_update_lrouter_on_failure(self): self.assertRaises(api_exc.NsxApiException, routerlib.update_lrouter, self.fake_cluster, 'fake_router', 'pluto', 'new_hop') class TestLogicalRouters(base.NsxlibTestCase): def _verify_lrouter(self, res_lrouter, expected_uuid, expected_display_name, expected_nexthop, expected_tenant_id, expected_neutron_id=None, expected_distributed=None): self.assertEqual(res_lrouter['uuid'], expected_uuid) nexthop = (res_lrouter['routing_config'] ['default_route_next_hop']['gateway_ip_address']) self.assertEqual(nexthop, expected_nexthop) router_tags = self._build_tag_dict(res_lrouter['tags']) self.assertIn('os_tid', router_tags) self.assertEqual(res_lrouter['display_name'], expected_display_name) self.assertEqual(expected_tenant_id, router_tags['os_tid']) if expected_distributed is not None: self.assertEqual(expected_distributed, res_lrouter['distributed']) if expected_neutron_id: self.assertIn('q_router_id', router_tags) self.assertEqual(expected_neutron_id, router_tags['q_router_id']) def test_get_lrouters(self): lrouter_uuids = [routerlib.create_lrouter( self.fake_cluster, 'whatever', 'pippo', 'fake-lrouter-%s' % k, '10.0.0.1')['uuid'] for k in range(3)] routers = routerlib.get_lrouters(self.fake_cluster, 'pippo') for router in routers: self.assertIn(router['uuid'], lrouter_uuids) def _create_lrouter(self, version, neutron_id=None, distributed=None): with mock.patch.object( self.fake_cluster.api_client, 'get_version', return_value=ver_module.Version(version)): if not neutron_id: neutron_id = uuidutils.generate_uuid() lrouter = routerlib.create_lrouter( self.fake_cluster, neutron_id, 'pippo', 'fake-lrouter', '10.0.0.1', distributed=distributed) return routerlib.get_lrouter(self.fake_cluster, lrouter['uuid']) def test_create_and_get_lrouter_v30(self): neutron_id = uuidutils.generate_uuid() res_lrouter = self._create_lrouter('3.0', neutron_id=neutron_id) self._verify_lrouter(res_lrouter, res_lrouter['uuid'], 'fake-lrouter', '10.0.0.1', 'pippo', expected_neutron_id=neutron_id) def test_create_and_get_lrouter_v31_centralized(self): neutron_id = uuidutils.generate_uuid() res_lrouter = self._create_lrouter('3.1', neutron_id=neutron_id, distributed=False) self._verify_lrouter(res_lrouter, res_lrouter['uuid'], 'fake-lrouter', '10.0.0.1', 'pippo', expected_neutron_id=neutron_id, expected_distributed=False) def test_create_and_get_lrouter_v31_distributed(self): neutron_id = uuidutils.generate_uuid() res_lrouter = self._create_lrouter('3.1', neutron_id=neutron_id, distributed=True) self._verify_lrouter(res_lrouter, res_lrouter['uuid'], 'fake-lrouter', '10.0.0.1', 'pippo', expected_neutron_id=neutron_id, expected_distributed=True) def test_create_and_get_lrouter_name_exceeds_40chars(self): neutron_id = uuidutils.generate_uuid() display_name = '*' * 50 lrouter = routerlib.create_lrouter(self.fake_cluster, neutron_id, 'pippo', display_name, '10.0.0.1') res_lrouter = routerlib.get_lrouter(self.fake_cluster, lrouter['uuid']) self._verify_lrouter(res_lrouter, lrouter['uuid'], '*' * 40, '10.0.0.1', 'pippo', expected_neutron_id=neutron_id) def _test_version_dependent_update_lrouter(self, version): def foo(*args, **kwargs): return version foo_func_dict = { 'update_lrouter': { 2: {-1: foo}, 3: {-1: foo, 2: foo} } } with mock.patch.object(self.fake_cluster.api_client, 'get_version', return_value=ver_module.Version(version)): with mock.patch.dict(routerlib.ROUTER_FUNC_DICT, foo_func_dict, clear=True): return routerlib.update_lrouter( self.fake_cluster, 'foo_router_id', 'foo_router_name', 'foo_nexthop', routes={'foo_destination': 'foo_address'}) def test_version_dependent_update_lrouter_old_versions(self): self.assertRaises(nsx_exc.InvalidVersion, self._test_version_dependent_update_lrouter, "2.9") self.assertRaises(nsx_exc.InvalidVersion, self._test_version_dependent_update_lrouter, "3.0") self.assertRaises(nsx_exc.InvalidVersion, self._test_version_dependent_update_lrouter, "3.1") def test_version_dependent_update_lrouter_new_versions(self): self.assertEqual("3.2", self._test_version_dependent_update_lrouter("3.2")) self.assertEqual("4.0", self._test_version_dependent_update_lrouter("4.0")) self.assertEqual("4.1", self._test_version_dependent_update_lrouter("4.1")) def test_update_lrouter_no_nexthop(self): neutron_id = uuidutils.generate_uuid() lrouter = routerlib.create_lrouter(self.fake_cluster, neutron_id, 'pippo', 'fake-lrouter', '10.0.0.1') lrouter = routerlib.update_lrouter(self.fake_cluster, lrouter['uuid'], 'new_name', None) res_lrouter = routerlib.get_lrouter(self.fake_cluster, lrouter['uuid']) self._verify_lrouter(res_lrouter, lrouter['uuid'], 'new_name', '10.0.0.1', 'pippo', expected_neutron_id=neutron_id) def test_update_lrouter(self): neutron_id = uuidutils.generate_uuid() lrouter = routerlib.create_lrouter(self.fake_cluster, neutron_id, 'pippo', 'fake-lrouter', '10.0.0.1') lrouter = routerlib.update_lrouter(self.fake_cluster, lrouter['uuid'], 'new_name', '192.168.0.1') res_lrouter = routerlib.get_lrouter(self.fake_cluster, lrouter['uuid']) self._verify_lrouter(res_lrouter, lrouter['uuid'], 'new_name', '192.168.0.1', 'pippo', expected_neutron_id=neutron_id) def test_update_nonexistent_lrouter_raises(self): self.assertRaises(exceptions.NotFound, routerlib.update_lrouter, self.fake_cluster, 'whatever', 'foo', '9.9.9.9') def test_delete_lrouter(self): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') routerlib.delete_lrouter(self.fake_cluster, lrouter['uuid']) self.assertRaises(exceptions.NotFound, routerlib.get_lrouter, self.fake_cluster, lrouter['uuid']) def test_query_lrouter_ports(self): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') router_port_uuids = [routerlib.create_router_lport( self.fake_cluster, lrouter['uuid'], 'pippo', 'qp_id_%s' % k, 'port-%s' % k, True, ['192.168.0.%s' % k], '00:11:22:33:44:55')['uuid'] for k in range(3)] ports = routerlib.query_lrouter_lports( self.fake_cluster, lrouter['uuid']) self.assertEqual(len(ports), 3) for res_port in ports: self.assertIn(res_port['uuid'], router_port_uuids) def test_query_lrouter_lports_nonexistent_lrouter_raises(self): self.assertRaises( exceptions.NotFound, routerlib.create_router_lport, self.fake_cluster, 'booo', 'pippo', 'neutron_port_id', 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') def test_create_and_get_lrouter_port(self): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') routerlib.create_router_lport( self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') ports = routerlib.query_lrouter_lports( self.fake_cluster, lrouter['uuid']) self.assertEqual(len(ports), 1) res_port = ports[0] port_tags = self._build_tag_dict(res_port['tags']) self.assertEqual(['192.168.0.1'], res_port['ip_addresses']) self.assertIn('os_tid', port_tags) self.assertIn('q_port_id', port_tags) self.assertEqual('pippo', port_tags['os_tid']) self.assertEqual('neutron_port_id', port_tags['q_port_id']) def test_create_lrouter_port_nonexistent_router_raises(self): self.assertRaises( exceptions.NotFound, routerlib.create_router_lport, self.fake_cluster, 'booo', 'pippo', 'neutron_port_id', 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') def test_update_lrouter_port(self): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') lrouter_port = routerlib.create_router_lport( self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') routerlib.update_router_lport( self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'], 'pippo', 'another_port_id', 'name', False, ['192.168.0.1', '10.10.10.254']) ports = routerlib.query_lrouter_lports( self.fake_cluster, lrouter['uuid']) self.assertEqual(len(ports), 1) res_port = ports[0] port_tags = self._build_tag_dict(res_port['tags']) self.assertEqual(['192.168.0.1', '10.10.10.254'], res_port['ip_addresses']) self.assertEqual('False', res_port['admin_status_enabled']) self.assertIn('os_tid', port_tags) self.assertIn('q_port_id', port_tags) self.assertEqual('pippo', port_tags['os_tid']) self.assertEqual('another_port_id', port_tags['q_port_id']) def test_update_lrouter_port_nonexistent_router_raises(self): self.assertRaises( exceptions.NotFound, routerlib.update_router_lport, self.fake_cluster, 'boo-router', 'boo-port', 'pippo', 'neutron_port_id', 'name', True, ['192.168.0.1']) def test_update_lrouter_port_nonexistent_port_raises(self): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') self.assertRaises( exceptions.NotFound, routerlib.update_router_lport, self.fake_cluster, lrouter['uuid'], 'boo-port', 'pippo', 'neutron_port_id', 'name', True, ['192.168.0.1']) def test_delete_lrouter_port(self): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') lrouter_port = routerlib.create_router_lport( self.fake_cluster, lrouter['uuid'], 'pippo', 'x', 'y', True, [], '00:11:22:33:44:55') ports = routerlib.query_lrouter_lports( self.fake_cluster, lrouter['uuid']) self.assertEqual(len(ports), 1) routerlib.delete_router_lport(self.fake_cluster, lrouter['uuid'], lrouter_port['uuid']) ports = routerlib.query_lrouter_lports( self.fake_cluster, lrouter['uuid']) self.assertFalse(len(ports)) def test_delete_lrouter_port_nonexistent_router_raises(self): self.assertRaises(exceptions.NotFound, routerlib.delete_router_lport, self.fake_cluster, 'xyz', 'abc') def test_delete_lrouter_port_nonexistent_port_raises(self): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') self.assertRaises(exceptions.NotFound, routerlib.delete_router_lport, self.fake_cluster, lrouter['uuid'], 'abc') def test_delete_peer_lrouter_port(self): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') lrouter_port = routerlib.create_router_lport( self.fake_cluster, lrouter['uuid'], 'pippo', 'x', 'y', True, [], '00:11:22:33:44:55') def fakegetport(*args, **kwargs): return {'_relations': {'LogicalPortAttachment': {'peer_port_uuid': lrouter_port['uuid']}}} # mock get_port with mock.patch.object(switchlib, 'get_port', new=fakegetport): routerlib.delete_peer_router_lport(self.fake_cluster, lrouter_port['uuid'], 'whatwever', 'whatever') def test_update_lrouter_port_ips_add_only(self): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') lrouter_port = routerlib.create_router_lport( self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') routerlib.update_lrouter_port_ips( self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'], ['10.10.10.254'], []) ports = routerlib.query_lrouter_lports( self.fake_cluster, lrouter['uuid']) self.assertEqual(len(ports), 1) res_port = ports[0] self.assertEqual(sorted(['10.10.10.254', '192.168.0.1']), sorted(res_port['ip_addresses'])) def test_update_lrouter_port_ips_remove_only(self): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') lrouter_port = routerlib.create_router_lport( self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', 'name', True, ['192.168.0.1', '10.10.10.254'], '00:11:22:33:44:55') routerlib.update_lrouter_port_ips( self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'], [], ['10.10.10.254']) ports = routerlib.query_lrouter_lports( self.fake_cluster, lrouter['uuid']) self.assertEqual(len(ports), 1) res_port = ports[0] self.assertEqual(['192.168.0.1'], res_port['ip_addresses']) def test_update_lrouter_port_ips_add_and_remove(self): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') lrouter_port = routerlib.create_router_lport( self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') routerlib.update_lrouter_port_ips( self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'], ['10.10.10.254'], ['192.168.0.1']) ports = routerlib.query_lrouter_lports( self.fake_cluster, lrouter['uuid']) self.assertEqual(len(ports), 1) res_port = ports[0] self.assertEqual(['10.10.10.254'], res_port['ip_addresses']) def test_update_lrouter_port_ips_nonexistent_router_raises(self): self.assertRaises( nsx_exc.NsxPluginException, routerlib.update_lrouter_port_ips, self.fake_cluster, 'boo-router', 'boo-port', [], []) def test_update_lrouter_port_ips_nsx_exception_raises(self): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') lrouter_port = routerlib.create_router_lport( self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') def raise_nsx_exc(*args, **kwargs): raise api_exc.NsxApiException() with mock.patch.object(nsxlib, 'do_request', new=raise_nsx_exc): self.assertRaises( nsx_exc.NsxPluginException, routerlib.update_lrouter_port_ips, self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'], [], []) def test_plug_lrouter_port_patch_attachment(self): tenant_id = 'pippo' transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] lswitch = switchlib.create_lswitch(self.fake_cluster, _uuid(), tenant_id, 'fake-switch', transport_zones_config) lport = switchlib.create_lport(self.fake_cluster, lswitch['uuid'], tenant_id, 'xyz', 'name', 'device_id', True) lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), tenant_id, 'fake-lrouter', '10.0.0.1') lrouter_port = routerlib.create_router_lport( self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', 'name', True, ['192.168.0.1'], '00:11:22:33:44:55:66') result = routerlib.plug_router_port_attachment( self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'], lport['uuid'], 'PatchAttachment') self.assertEqual(lport['uuid'], result['LogicalPortAttachment']['peer_port_uuid']) def test_plug_lrouter_port_l3_gw_attachment(self): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') lrouter_port = routerlib.create_router_lport( self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', 'name', True, ['192.168.0.1'], '00:11:22:33:44:55:66') result = routerlib.plug_router_port_attachment( self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'], 'gw_att', 'L3GatewayAttachment') self.assertEqual( 'gw_att', result['LogicalPortAttachment']['l3_gateway_service_uuid']) def test_plug_lrouter_port_l3_gw_attachment_with_vlan(self): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') lrouter_port = routerlib.create_router_lport( self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') result = routerlib.plug_router_port_attachment( self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'], 'gw_att', 'L3GatewayAttachment', 123) self.assertEqual( 'gw_att', result['LogicalPortAttachment']['l3_gateway_service_uuid']) self.assertEqual( '123', result['LogicalPortAttachment']['vlan_id']) def test_plug_lrouter_port_invalid_attachment_type_raises(self): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') lrouter_port = routerlib.create_router_lport( self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') self.assertRaises(nsx_exc.InvalidAttachmentType, routerlib.plug_router_port_attachment, self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'], 'gw_att', 'BadType') def _test_create_router_snat_rule(self, version): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') with mock.patch.object(self.fake_cluster.api_client, 'get_version', new=lambda: ver_module.Version(version)): routerlib.create_lrouter_snat_rule( self.fake_cluster, lrouter['uuid'], '10.0.0.2', '10.0.0.2', order=200, match_criteria={'source_ip_addresses': '192.168.0.24'}) rules = routerlib.query_nat_rules( self.fake_cluster, lrouter['uuid']) self.assertEqual(len(rules), 1) def test_create_router_snat_rule_v3(self): self._test_create_router_snat_rule('3.0') def test_create_router_snat_rule_v2(self): self._test_create_router_snat_rule('2.0') def _test_create_router_dnat_rule(self, version, dest_port=None): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') with mock.patch.object(self.fake_cluster.api_client, 'get_version', return_value=ver_module.Version(version)): routerlib.create_lrouter_dnat_rule( self.fake_cluster, lrouter['uuid'], '192.168.0.2', order=200, dest_port=dest_port, match_criteria={'destination_ip_addresses': '10.0.0.3'}) rules = routerlib.query_nat_rules( self.fake_cluster, lrouter['uuid']) self.assertEqual(len(rules), 1) def test_create_router_dnat_rule_v3(self): self._test_create_router_dnat_rule('3.0') def test_create_router_dnat_rule_v2(self): self._test_create_router_dnat_rule('2.0') def test_create_router_dnat_rule_v2_with_destination_port(self): self._test_create_router_dnat_rule('2.0', 8080) def test_create_router_dnat_rule_v3_with_destination_port(self): self._test_create_router_dnat_rule('3.0', 8080) def test_create_router_snat_rule_invalid_match_keys_raises(self): # In this case the version does not make a difference lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') with mock.patch.object(self.fake_cluster.api_client, 'get_version', new=lambda: '2.0'): self.assertRaises(AttributeError, routerlib.create_lrouter_snat_rule, self.fake_cluster, lrouter['uuid'], '10.0.0.2', '10.0.0.2', order=200, match_criteria={'foo': 'bar'}) def _test_create_router_nosnat_rule(self, version, expected=1): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') with mock.patch.object(self.fake_cluster.api_client, 'get_version', new=lambda: ver_module.Version(version)): routerlib.create_lrouter_nosnat_rule( self.fake_cluster, lrouter['uuid'], order=100, match_criteria={'destination_ip_addresses': '192.168.0.0/24'}) rules = routerlib.query_nat_rules( self.fake_cluster, lrouter['uuid']) # NoSNAT rules do not exist in V2 self.assertEqual(len(rules), expected) def test_create_router_nosnat_rule_v2(self): self._test_create_router_nosnat_rule('2.0', expected=0) def test_create_router_nosnat_rule_v3(self): self._test_create_router_nosnat_rule('3.0') def _prepare_nat_rules_for_delete_tests(self): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') # v2 or v3 makes no difference for this test with mock.patch.object(self.fake_cluster.api_client, 'get_version', new=lambda: ver_module.Version('2.0')): routerlib.create_lrouter_snat_rule( self.fake_cluster, lrouter['uuid'], '10.0.0.2', '10.0.0.2', order=220, match_criteria={'source_ip_addresses': '192.168.0.0/24'}) routerlib.create_lrouter_snat_rule( self.fake_cluster, lrouter['uuid'], '10.0.0.3', '10.0.0.3', order=200, match_criteria={'source_ip_addresses': '192.168.0.2/32'}) routerlib.create_lrouter_dnat_rule( self.fake_cluster, lrouter['uuid'], '192.168.0.2', order=200, match_criteria={'destination_ip_addresses': '10.0.0.3'}) return lrouter def test_delete_router_nat_rules_by_match_on_destination_ip(self): lrouter = self._prepare_nat_rules_for_delete_tests() rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid']) self.assertEqual(len(rules), 3) routerlib.delete_nat_rules_by_match( self.fake_cluster, lrouter['uuid'], 'DestinationNatRule', 1, 1, destination_ip_addresses='10.0.0.3') rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid']) self.assertEqual(len(rules), 2) def test_delete_router_nat_rules_by_match_on_source_ip(self): lrouter = self._prepare_nat_rules_for_delete_tests() rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid']) self.assertEqual(len(rules), 3) routerlib.delete_nat_rules_by_match( self.fake_cluster, lrouter['uuid'], 'SourceNatRule', 1, 1, source_ip_addresses='192.168.0.2/32') rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid']) self.assertEqual(len(rules), 2) def test_delete_router_nat_rules_by_match_no_match_expected(self): lrouter = self._prepare_nat_rules_for_delete_tests() rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid']) self.assertEqual(len(rules), 3) routerlib.delete_nat_rules_by_match( self.fake_cluster, lrouter['uuid'], 'SomeWeirdType', 0) rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid']) self.assertEqual(len(rules), 3) routerlib.delete_nat_rules_by_match( self.fake_cluster, lrouter['uuid'], 'DestinationNatRule', 0, destination_ip_addresses='99.99.99.99') rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid']) self.assertEqual(len(rules), 3) def test_delete_router_nat_rules_by_match_no_match_raises(self): lrouter = self._prepare_nat_rules_for_delete_tests() rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid']) self.assertEqual(len(rules), 3) self.assertRaises( nsx_exc.NatRuleMismatch, routerlib.delete_nat_rules_by_match, self.fake_cluster, lrouter['uuid'], 'SomeWeirdType', 1, 1) def test_delete_nat_rules_by_match_len_mismatch_does_not_raise(self): lrouter = self._prepare_nat_rules_for_delete_tests() rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid']) self.assertEqual(len(rules), 3) deleted_rules = routerlib.delete_nat_rules_by_match( self.fake_cluster, lrouter['uuid'], 'DestinationNatRule', max_num_expected=1, min_num_expected=1, raise_on_len_mismatch=False, destination_ip_addresses='99.99.99.99') self.assertEqual(0, deleted_rules) # add an extra rule to emulate a duplicate one with mock.patch.object(self.fake_cluster.api_client, 'get_version', new=lambda: ver_module.Version('2.0')): routerlib.create_lrouter_snat_rule( self.fake_cluster, lrouter['uuid'], '10.0.0.2', '10.0.0.2', order=220, match_criteria={'source_ip_addresses': '192.168.0.0/24'}) deleted_rules_2 = routerlib.delete_nat_rules_by_match( self.fake_cluster, lrouter['uuid'], 'SourceNatRule', min_num_expected=1, max_num_expected=1, raise_on_len_mismatch=False, source_ip_addresses='192.168.0.0/24') self.assertEqual(2, deleted_rules_2) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsxlib/mh/__init__.py0000666000175100017510000000000013244523345024251 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsxlib/mh/test_queue.py0000666000175100017510000000502413244523345024710 0ustar zuulzuul00000000000000# Copyright (c) 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock from neutron_lib import exceptions from vmware_nsx.api_client import exception as api_exc from vmware_nsx.nsxlib import mh as nsxlib from vmware_nsx.nsxlib.mh import queue as queuelib from vmware_nsx.tests.unit.nsxlib.mh import base class TestLogicalQueueLib(base.NsxlibTestCase): def setUp(self): super(TestLogicalQueueLib, self).setUp() self.fake_queue = { 'name': 'fake_queue', 'min': 0, 'max': 256, 'dscp': 0, 'qos_marking': False } def test_create_and_get_lqueue(self): queue_id = queuelib.create_lqueue( self.fake_cluster, self.fake_queue) queue_res = nsxlib.do_request( 'GET', nsxlib._build_uri_path('lqueue', resource_id=queue_id), cluster=self.fake_cluster) self.assertEqual(queue_id, queue_res['uuid']) self.assertEqual('fake_queue', queue_res['display_name']) def test_create_lqueue_nsx_error_raises(self): def raise_nsx_exc(*args, **kwargs): raise api_exc.NsxApiException() with mock.patch.object(nsxlib, 'do_request', new=raise_nsx_exc): self.assertRaises( exceptions.NeutronException, queuelib.create_lqueue, self.fake_cluster, self.fake_queue) def test_delete_lqueue(self): queue_id = queuelib.create_lqueue( self.fake_cluster, self.fake_queue) queuelib.delete_lqueue(self.fake_cluster, queue_id) self.assertRaises(exceptions.NotFound, nsxlib.do_request, 'GET', nsxlib._build_uri_path( 'lqueue', resource_id=queue_id), cluster=self.fake_cluster) def test_delete_non_existing_lqueue_raises(self): self.assertRaises(exceptions.NeutronException, queuelib.delete_lqueue, self.fake_cluster, 'whatever') vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsxlib/mh/test_lsn.py0000666000175100017510000003453213244523345024366 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.tests import base from neutron_lib import exceptions from oslo_serialization import jsonutils import six from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import utils from vmware_nsx.nsxlib.mh import lsn as lsnlib class LSNTestCase(base.BaseTestCase): def setUp(self): super(LSNTestCase, self).setUp() self.mock_request_p = mock.patch( 'vmware_nsx.nsxlib.mh.do_request') self.mock_request = self.mock_request_p.start() self.cluster = mock.Mock() self.cluster.default_service_cluster_uuid = 'foo' def test_service_cluster_None(self): self.mock_request.return_value = None expected = lsnlib.service_cluster_exists(None, None) self.assertFalse(expected) def test_service_cluster_found(self): self.mock_request.return_value = { "results": [ { "_href": "/ws.v1/service-cluster/foo_uuid", "display_name": "foo_name", "uuid": "foo_uuid", "tags": [], "_schema": "/ws.v1/schema/ServiceClusterConfig", "gateways": [] } ], "result_count": 1 } expected = lsnlib.service_cluster_exists(None, 'foo_uuid') self.assertTrue(expected) def test_service_cluster_not_found(self): self.mock_request.side_effect = exceptions.NotFound() expected = lsnlib.service_cluster_exists(None, 'foo_uuid') self.assertFalse(expected) def test_lsn_for_network_create(self): net_id = "foo_network_id" tags = utils.get_tags(n_network_id=net_id) obj = {"edge_cluster_uuid": "foo", "tags": tags} lsnlib.lsn_for_network_create(self.cluster, net_id) self.mock_request.assert_called_once_with( "POST", "/ws.v1/lservices-node", jsonutils.dumps(obj, sort_keys=True), cluster=self.cluster) def test_lsn_for_network_get(self): net_id = "foo_network_id" lsn_id = "foo_lsn_id" self.mock_request.return_value = { "results": [{"uuid": "foo_lsn_id"}], "result_count": 1 } result = lsnlib.lsn_for_network_get(self.cluster, net_id) self.assertEqual(lsn_id, result) self.mock_request.assert_called_once_with( "GET", ("/ws.v1/lservices-node?fields=uuid&tag=%s&" "tag_scope=n_network_id" % net_id), cluster=self.cluster) def test_lsn_for_network_get_none(self): net_id = "foo_network_id" self.mock_request.return_value = { "results": [{"uuid": "foo_lsn_id1"}, {"uuid": "foo_lsn_id2"}], "result_count": 2 } result = lsnlib.lsn_for_network_get(self.cluster, net_id) self.assertIsNone(result) def test_lsn_for_network_get_raise_not_found(self): net_id = "foo_network_id" self.mock_request.return_value = { "results": [], "result_count": 0 } self.assertRaises(exceptions.NotFound, lsnlib.lsn_for_network_get, self.cluster, net_id) def test_lsn_delete(self): lsn_id = "foo_id" lsnlib.lsn_delete(self.cluster, lsn_id) self.mock_request.assert_called_once_with( "DELETE", "/ws.v1/lservices-node/%s" % lsn_id, cluster=self.cluster) def _test_lsn_port_host_entries_update(self, lsn_type, hosts_data): lsn_id = 'foo_lsn_id' lsn_port_id = 'foo_lsn_port_id' lsnlib.lsn_port_host_entries_update( self.cluster, lsn_id, lsn_port_id, lsn_type, hosts_data) self.mock_request.assert_called_once_with( 'PUT', '/ws.v1/lservices-node/%s/lport/%s/%s' % (lsn_id, lsn_port_id, lsn_type), jsonutils.dumps({'hosts': hosts_data}, sort_keys=True), cluster=self.cluster) def test_lsn_port_dhcp_entries_update(self): hosts_data = [{"ip_address": "11.22.33.44", "mac_address": "aa:bb:cc:dd:ee:ff"}, {"ip_address": "44.33.22.11", "mac_address": "ff:ee:dd:cc:bb:aa"}] self._test_lsn_port_host_entries_update("dhcp", hosts_data) def test_lsn_port_metadata_entries_update(self): hosts_data = [{"ip_address": "11.22.33.44", "device_id": "foo_vm_uuid"}] self._test_lsn_port_host_entries_update("metadata-proxy", hosts_data) def test_lsn_port_create(self): port_data = { "ip_address": "1.2.3.0/24", "mac_address": "aa:bb:cc:dd:ee:ff", "subnet_id": "foo_subnet_id" } port_id = "foo_port_id" self.mock_request.return_value = {"uuid": port_id} lsn_id = "foo_lsn_id" result = lsnlib.lsn_port_create(self.cluster, lsn_id, port_data) self.assertEqual(result, port_id) tags = utils.get_tags(n_subnet_id=port_data["subnet_id"], n_mac_address=port_data["mac_address"]) port_obj = { "ip_address": port_data["ip_address"], "mac_address": port_data["mac_address"], "type": "LogicalServicesNodePortConfig", "tags": tags } self.mock_request.assert_called_once_with( "POST", "/ws.v1/lservices-node/%s/lport" % lsn_id, jsonutils.dumps(port_obj, sort_keys=True), cluster=self.cluster) def test_lsn_port_delete(self): lsn_id = "foo_lsn_id" lsn_port_id = "foo_port_id" lsnlib.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) self.mock_request.assert_called_once_with( "DELETE", "/ws.v1/lservices-node/%s/lport/%s" % (lsn_id, lsn_port_id), cluster=self.cluster) def test_lsn_port_get_with_filters(self): lsn_id = "foo_lsn_id" port_id = "foo_port_id" filters = {"tag": "foo_tag", "tag_scope": "foo_scope"} self.mock_request.return_value = { "results": [{"uuid": port_id}], "result_count": 1 } result = lsnlib._lsn_port_get(self.cluster, lsn_id, filters) self.assertEqual(result, port_id) self.mock_request.assert_called_once_with( "GET", ("/ws.v1/lservices-node/%s/lport?fields=uuid&tag=%s&" "tag_scope=%s" % (lsn_id, filters["tag"], filters["tag_scope"])), cluster=self.cluster) def test_lsn_port_get_with_filters_return_none(self): self.mock_request.return_value = { "results": [{"uuid": "foo1"}, {"uuid": "foo2"}], "result_count": 2 } result = lsnlib._lsn_port_get(self.cluster, "lsn_id", None) self.assertIsNone(result) def test_lsn_port_get_with_filters_raises_not_found(self): self.mock_request.return_value = {"results": [], "result_count": 0} self.assertRaises(exceptions.NotFound, lsnlib._lsn_port_get, self.cluster, "lsn_id", None) def test_lsn_port_info_get(self): self.mock_request.return_value = { "tags": [ {"scope": "n_mac_address", "tag": "fa:16:3e:27:fd:a0"}, {"scope": "n_subnet_id", "tag": "foo_subnet_id"}, ], "mac_address": "aa:bb:cc:dd:ee:ff", "ip_address": "0.0.0.0/0", "uuid": "foo_lsn_port_id" } result = lsnlib.lsn_port_info_get( self.cluster, 'foo_lsn_id', 'foo_lsn_port_id') self.mock_request.assert_called_once_with( 'GET', '/ws.v1/lservices-node/foo_lsn_id/lport/foo_lsn_port_id', cluster=self.cluster) self.assertIn('subnet_id', result) self.assertIn('mac_address', result) def test_lsn_port_info_get_raise_not_found(self): self.mock_request.side_effect = exceptions.NotFound self.assertRaises(exceptions.NotFound, lsnlib.lsn_port_info_get, self.cluster, mock.ANY, mock.ANY) def test_lsn_port_plug_network(self): lsn_id = "foo_lsn_id" lsn_port_id = "foo_lsn_port_id" lswitch_port_id = "foo_lswitch_port_id" lsnlib.lsn_port_plug_network( self.cluster, lsn_id, lsn_port_id, lswitch_port_id) self.mock_request.assert_called_once_with( "PUT", ("/ws.v1/lservices-node/%s/lport/%s/" "attachment") % (lsn_id, lsn_port_id), jsonutils.dumps({"peer_port_uuid": lswitch_port_id, "type": "PatchAttachment"}, sort_keys=True), cluster=self.cluster) def test_lsn_port_plug_network_raise_conflict(self): lsn_id = "foo_lsn_id" lsn_port_id = "foo_lsn_port_id" lswitch_port_id = "foo_lswitch_port_id" self.mock_request.side_effect = api_exc.Conflict self.assertRaises( nsx_exc.LsnConfigurationConflict, lsnlib.lsn_port_plug_network, self.cluster, lsn_id, lsn_port_id, lswitch_port_id) def _test_lsn_port_dhcp_configure( self, lsn_id, lsn_port_id, is_enabled, opts): lsnlib.lsn_port_dhcp_configure( self.cluster, lsn_id, lsn_port_id, is_enabled, opts) opt_array = [ {"name": key, "value": val} for key, val in six.iteritems(opts) ] self.mock_request.assert_has_calls([ mock.call("PUT", "/ws.v1/lservices-node/%s/dhcp" % lsn_id, jsonutils.dumps({"enabled": is_enabled}, sort_keys=True), cluster=self.cluster), mock.call("PUT", ("/ws.v1/lservices-node/%s/" "lport/%s/dhcp") % (lsn_id, lsn_port_id), jsonutils.dumps({"options": opt_array}, sort_keys=True), cluster=self.cluster) ]) def test_lsn_port_dhcp_configure_empty_opts(self): lsn_id = "foo_lsn_id" lsn_port_id = "foo_lsn_port_id" is_enabled = False opts = {} self._test_lsn_port_dhcp_configure( lsn_id, lsn_port_id, is_enabled, opts) def test_lsn_port_dhcp_configure_with_opts(self): lsn_id = "foo_lsn_id" lsn_port_id = "foo_lsn_port_id" is_enabled = True opts = {"opt1": "val1", "opt2": "val2"} self._test_lsn_port_dhcp_configure( lsn_id, lsn_port_id, is_enabled, opts) def _test_lsn_metadata_configure( self, lsn_id, is_enabled, opts, expected_opts): lsnlib.lsn_metadata_configure( self.cluster, lsn_id, is_enabled, opts) lsn_obj = {"enabled": is_enabled} lsn_obj.update(expected_opts) self.mock_request.assert_has_calls([ mock.call("PUT", "/ws.v1/lservices-node/%s/metadata-proxy" % lsn_id, jsonutils.dumps(lsn_obj, sort_keys=True), cluster=self.cluster), ]) def test_lsn_port_metadata_configure_empty_secret(self): lsn_id = "foo_lsn_id" is_enabled = True opts = { "metadata_server_ip": "1.2.3.4", "metadata_server_port": "8775" } expected_opts = { "metadata_server_ip": "1.2.3.4", "metadata_server_port": "8775", } self._test_lsn_metadata_configure( lsn_id, is_enabled, opts, expected_opts) def test_lsn_metadata_configure_with_secret(self): lsn_id = "foo_lsn_id" is_enabled = True opts = { "metadata_server_ip": "1.2.3.4", "metadata_server_port": "8775", "metadata_proxy_shared_secret": "foo_secret" } expected_opts = { "metadata_server_ip": "1.2.3.4", "metadata_server_port": "8775", "options": [{ "name": "metadata_proxy_shared_secret", "value": "foo_secret" }] } self._test_lsn_metadata_configure( lsn_id, is_enabled, opts, expected_opts) def _test_lsn_port_host_action( self, lsn_port_action_func, extra_action, action, host): lsn_id = "foo_lsn_id" lsn_port_id = "foo_lsn_port_id" lsn_port_action_func(self.cluster, lsn_id, lsn_port_id, host) self.mock_request.assert_called_once_with( "POST", ("/ws.v1/lservices-node/%s/lport/" "%s/%s?action=%s") % (lsn_id, lsn_port_id, extra_action, action), jsonutils.dumps(host, sort_keys=True), cluster=self.cluster) def test_lsn_port_dhcp_host_add(self): host = { "ip_address": "1.2.3.4", "mac_address": "aa:bb:cc:dd:ee:ff" } self._test_lsn_port_host_action( lsnlib.lsn_port_dhcp_host_add, "dhcp", "add_host", host) def test_lsn_port_dhcp_host_remove(self): host = { "ip_address": "1.2.3.4", "mac_address": "aa:bb:cc:dd:ee:ff" } self._test_lsn_port_host_action( lsnlib.lsn_port_dhcp_host_remove, "dhcp", "remove_host", host) def test_lsn_port_metadata_host_add(self): host = { "ip_address": "1.2.3.4", "instance_id": "foo_instance_id" } self._test_lsn_port_host_action(lsnlib.lsn_port_metadata_host_add, "metadata-proxy", "add_host", host) def test_lsn_port_metadata_host_remove(self): host = { "ip_address": "1.2.3.4", "instance_id": "foo_instance_id" } self._test_lsn_port_host_action(lsnlib.lsn_port_metadata_host_remove, "metadata-proxy", "remove_host", host) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsxlib/mh/test_secgroup.py0000666000175100017510000002605613244523345025423 0ustar zuulzuul00000000000000# Copyright (c) 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from neutron.tests.unit.api.v2 import test_base from neutron_lib import constants from neutron_lib import exceptions from vmware_nsx.nsxlib import mh as nsxlib from vmware_nsx.nsxlib.mh import secgroup as secgrouplib from vmware_nsx.tests.unit.nsxlib.mh import base _uuid = test_base._uuid class SecurityProfileTestCase(base.NsxlibTestCase): def test_create_and_get_security_profile(self): sec_prof = secgrouplib.create_security_profile( self.fake_cluster, _uuid(), 'pippo', {'name': 'test'}) sec_prof_res = nsxlib.do_request( secgrouplib.HTTP_GET, nsxlib._build_uri_path('security-profile', resource_id=sec_prof['uuid']), cluster=self.fake_cluster) self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid']) # Check for builtin rules self.assertEqual(len(sec_prof_res['logical_port_egress_rules']), 1) self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 2) def test_create_and_get_default_security_profile(self): sec_prof = secgrouplib.create_security_profile( self.fake_cluster, _uuid(), 'pippo', {'name': 'default'}) sec_prof_res = nsxlib.do_request( secgrouplib.HTTP_GET, nsxlib._build_uri_path('security-profile', resource_id=sec_prof['uuid']), cluster=self.fake_cluster) self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid']) # Check for builtin rules self.assertEqual(len(sec_prof_res['logical_port_egress_rules']), 3) self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 2) def test_update_security_profile_raise_not_found(self): self.assertRaises(exceptions.NotFound, secgrouplib.update_security_profile, self.fake_cluster, _uuid(), 'tatore_magno(the great)') def test_update_security_profile(self): tenant_id = 'foo_tenant_uuid' secgroup_id = 'foo_secgroup_uuid' old_sec_prof = secgrouplib.create_security_profile( self.fake_cluster, tenant_id, secgroup_id, {'name': 'tatore_magno'}) new_sec_prof = secgrouplib.update_security_profile( self.fake_cluster, old_sec_prof['uuid'], 'aaron_magno') self.assertEqual('aaron_magno', new_sec_prof['display_name']) def test_update_security_profile_rules(self): sec_prof = secgrouplib.create_security_profile( self.fake_cluster, _uuid(), 'pippo', {'name': 'test'}) ingress_rule = {'ethertype': 'IPv4'} egress_rule = {'ethertype': 'IPv4', 'profile_uuid': 'xyz'} new_rules = {'logical_port_egress_rules': [egress_rule], 'logical_port_ingress_rules': [ingress_rule]} secgrouplib.update_security_group_rules( self.fake_cluster, sec_prof['uuid'], new_rules) sec_prof_res = nsxlib.do_request( nsxlib.HTTP_GET, nsxlib._build_uri_path('security-profile', resource_id=sec_prof['uuid']), cluster=self.fake_cluster) self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid']) # Check for builtin rules self.assertEqual(len(sec_prof_res['logical_port_egress_rules']), 2) self.assertIn(egress_rule, sec_prof_res['logical_port_egress_rules']) self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 1) self.assertIn(ingress_rule, sec_prof_res['logical_port_ingress_rules']) def test_update_security_profile_rules_noingress(self): sec_prof = secgrouplib.create_security_profile( self.fake_cluster, _uuid(), 'pippo', {'name': 'test'}) hidden_ingress_rule = {'ethertype': 'IPv4', 'ip_prefix': '127.0.0.1/32'} egress_rule = {'ethertype': 'IPv4', 'profile_uuid': 'xyz'} new_rules = {'logical_port_egress_rules': [egress_rule], 'logical_port_ingress_rules': []} secgrouplib.update_security_group_rules( self.fake_cluster, sec_prof['uuid'], new_rules) sec_prof_res = nsxlib.do_request( nsxlib.HTTP_GET, nsxlib._build_uri_path('security-profile', resource_id=sec_prof['uuid']), cluster=self.fake_cluster) self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid']) # Check for builtin rules self.assertEqual(len(sec_prof_res['logical_port_egress_rules']), 2) self.assertIn(egress_rule, sec_prof_res['logical_port_egress_rules']) self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 1) self.assertIn(hidden_ingress_rule, sec_prof_res['logical_port_ingress_rules']) def test_update_security_profile_rules_summarize_port_range(self): sec_prof = secgrouplib.create_security_profile( self.fake_cluster, _uuid(), 'pippo', {'name': 'test'}) ingress_rule = [{'ethertype': 'IPv4'}] egress_rules = [ {'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP, 'port_range_min': 1, 'port_range_max': 65535}] new_rules = {'logical_port_egress_rules': egress_rules, 'logical_port_ingress_rules': [ingress_rule]} egress_rules_summarized = [{'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP}] secgrouplib.update_security_group_rules( self.fake_cluster, sec_prof['uuid'], new_rules) sec_prof_res = nsxlib.do_request( nsxlib.HTTP_GET, nsxlib._build_uri_path('security-profile', resource_id=sec_prof['uuid']), cluster=self.fake_cluster) self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid']) # Check for builtin rules self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 1) self.assertEqual(sec_prof_res['logical_port_egress_rules'], egress_rules_summarized) self.assertIn(ingress_rule, sec_prof_res['logical_port_ingress_rules']) def test_update_security_profile_rules_summarize_ip_prefix(self): sec_prof = secgrouplib.create_security_profile( self.fake_cluster, _uuid(), 'pippo', {'name': 'test'}) ingress_rule = [{'ethertype': 'IPv4'}] egress_rules = [ {'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP, 'ip_prefix': '0.0.0.0/0'}, {'ethertype': 'IPv6', 'protocol': constants.PROTO_NUM_UDP, 'ip_prefix': '::/0'}] new_rules = {'logical_port_egress_rules': egress_rules, 'logical_port_ingress_rules': [ingress_rule]} egress_rules_summarized = [ {'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP}, {'ethertype': 'IPv6', 'protocol': constants.PROTO_NUM_UDP}] secgrouplib.update_security_group_rules( self.fake_cluster, sec_prof['uuid'], new_rules) sec_prof_res = nsxlib.do_request( nsxlib.HTTP_GET, nsxlib._build_uri_path('security-profile', resource_id=sec_prof['uuid']), cluster=self.fake_cluster) self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid']) # Check for builtin rules self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 1) self.assertEqual(sec_prof_res['logical_port_egress_rules'], egress_rules_summarized) self.assertIn(ingress_rule, sec_prof_res['logical_port_ingress_rules']) def test_update_security_profile_rules_summarize_subset(self): sec_prof = secgrouplib.create_security_profile( self.fake_cluster, _uuid(), 'pippo', {'name': 'test'}) ingress_rule = [{'ethertype': 'IPv4'}] egress_rules = [ {'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP, 'port_range_min': 1, 'port_range_max': 1, 'remote_ip_prefix': '1.1.1.1/20'}, {'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP, 'port_range_min': 2, 'port_range_max': 2, 'profile_uuid': 'xyz'}, {'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP}] new_rules = {'logical_port_egress_rules': egress_rules, 'logical_port_ingress_rules': [ingress_rule]} egress_rules_summarized = [ {'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP}] secgrouplib.update_security_group_rules( self.fake_cluster, sec_prof['uuid'], new_rules) sec_prof_res = nsxlib.do_request( nsxlib.HTTP_GET, nsxlib._build_uri_path('security-profile', resource_id=sec_prof['uuid']), cluster=self.fake_cluster) self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid']) # Check for builtin rules self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 1) self.assertEqual(sec_prof_res['logical_port_egress_rules'], egress_rules_summarized) self.assertIn(ingress_rule, sec_prof_res['logical_port_ingress_rules']) def test_update_non_existing_securityprofile_raises(self): self.assertRaises(exceptions.NeutronException, secgrouplib.update_security_group_rules, self.fake_cluster, 'whatever', {'logical_port_egress_rules': [], 'logical_port_ingress_rules': []}) def test_delete_security_profile(self): sec_prof = secgrouplib.create_security_profile( self.fake_cluster, _uuid(), 'pippo', {'name': 'test'}) secgrouplib.delete_security_profile( self.fake_cluster, sec_prof['uuid']) self.assertRaises(exceptions.NotFound, nsxlib.do_request, nsxlib.HTTP_GET, nsxlib._build_uri_path( 'security-profile', resource_id=sec_prof['uuid']), cluster=self.fake_cluster) def test_delete_non_existing_securityprofile_raises(self): self.assertRaises(exceptions.NeutronException, secgrouplib.delete_security_profile, self.fake_cluster, 'whatever') vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsxlib/mh/base.py0000666000175100017510000000702013244523345023435 0ustar zuulzuul00000000000000# Copyright (c) 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock from neutron.tests import base from neutron.tests.unit.api.v2 import test_base from vmware_nsx.api_client import client from vmware_nsx.api_client import exception from vmware_nsx.api_client import version from vmware_nsx.common import config # noqa from vmware_nsx import nsx_cluster as cluster from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsx_mh.apiclient import fake _uuid = test_base._uuid class NsxlibTestCase(base.BaseTestCase): def setUp(self): self.fc = fake.FakeClient(vmware.STUBS_PATH) self.mock_nsxapi = mock.patch(vmware.NSXAPI_NAME, autospec=True) instance = self.mock_nsxapi.start() instance.return_value.login.return_value = "the_cookie" fake_version = getattr(self, 'fake_version', "3.0") instance.return_value.get_version.return_value = ( version.Version(fake_version)) instance.return_value.request.side_effect = self.fc.fake_request self.fake_cluster = cluster.NSXCluster( name='fake-cluster', nsx_controllers=['1.1.1.1:999'], default_tz_uuid=_uuid(), nsx_user='foo', nsx_password='bar') self.fake_cluster.api_client = client.NsxApiClient( ('1.1.1.1', '999', True), self.fake_cluster.nsx_user, self.fake_cluster.nsx_password, self.fake_cluster.http_timeout, self.fake_cluster.retries, self.fake_cluster.redirects) super(NsxlibTestCase, self).setUp() self.addCleanup(self.fc.reset_all) def _build_tag_dict(self, tags): # This syntax is needed for python 2.6 compatibility return dict((t['scope'], t['tag']) for t in tags) class NsxlibNegativeBaseTestCase(base.BaseTestCase): def setUp(self): self.fc = fake.FakeClient(vmware.STUBS_PATH) self.mock_nsxapi = mock.patch(vmware.NSXAPI_NAME, autospec=True) instance = self.mock_nsxapi.start() instance.return_value.login.return_value = "the_cookie" # Choose 3.0, but the version is irrelevant for the aim of # these tests as calls are throwing up errors anyway fake_version = getattr(self, 'fake_version', "3.0") instance.return_value.get_version.return_value = ( version.Version(fake_version)) def _faulty_request(*args, **kwargs): raise exception.NsxApiException() instance.return_value.request.side_effect = _faulty_request self.fake_cluster = cluster.NSXCluster( name='fake-cluster', nsx_controllers=['1.1.1.1:999'], default_tz_uuid=_uuid(), nsx_user='foo', nsx_password='bar') self.fake_cluster.api_client = client.NsxApiClient( ('1.1.1.1', '999', True), self.fake_cluster.nsx_user, self.fake_cluster.nsx_password, self.fake_cluster.http_timeout, self.fake_cluster.retries, self.fake_cluster.redirects) super(NsxlibNegativeBaseTestCase, self).setUp() self.addCleanup(self.fc.reset_all) vmware-nsx-12.0.1/vmware_nsx/tests/unit/nsxlib/mh/test_versioning.py0000666000175100017510000000464113244523345025753 0ustar zuulzuul00000000000000# Copyright (c) 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from neutron.tests import base from vmware_nsx.api_client import ( version as version_module) from vmware_nsx.api_client import exception from vmware_nsx.nsxlib.mh import router as routerlib from vmware_nsx.nsxlib.mh import versioning class TestVersioning(base.BaseTestCase): def test_function_handling_missing_minor(self): version = version_module.Version('2.0') function = versioning.get_function_by_version( routerlib.ROUTER_FUNC_DICT, 'create_lrouter', version) self.assertEqual(routerlib.create_implicit_routing_lrouter, function) def test_function_handling_with_both_major_and_minor(self): version = version_module.Version('3.2') function = versioning.get_function_by_version( routerlib.ROUTER_FUNC_DICT, 'create_lrouter', version) self.assertEqual(routerlib.create_explicit_routing_lrouter, function) def test_function_handling_with_newer_major(self): version = version_module.Version('5.2') function = versioning.get_function_by_version( routerlib.ROUTER_FUNC_DICT, 'create_lrouter', version) self.assertEqual(routerlib.create_explicit_routing_lrouter, function) def test_function_handling_with_obsolete_major(self): version = version_module.Version('1.2') self.assertRaises(NotImplementedError, versioning.get_function_by_version, routerlib.ROUTER_FUNC_DICT, 'create_lrouter', version) def test_function_handling_with_unknown_version(self): self.assertRaises(exception.ServiceUnavailable, versioning.get_function_by_version, routerlib.ROUTER_FUNC_DICT, 'create_lrouter', None) vmware-nsx-12.0.1/vmware_nsx/_i18n.py0000666000175100017510000000203413244523345017415 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n DOMAIN = "vmware_nsx" _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # The contextual translation function using the name "_C" _C = _translators.contextual_form # The plural translation function using the name "_P" _P = _translators.plural_form def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) vmware-nsx-12.0.1/vmware_nsx/plugins/0000775000175100017510000000000013244524600017600 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/dvs/0000775000175100017510000000000013244524600020374 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/dvs/dhcp.py0000666000175100017510000000543013244523345021675 0ustar zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron.agent.common import ovs_lib from neutron.agent.linux import dhcp LOG = logging.getLogger(__name__) OPTS = [ cfg.StrOpt('dvs_integration_bridge', default='br-dvs', help=_('Name of Open vSwitch bridge to use for DVS networks')), cfg.StrOpt('dhcp_override_mac', help=_('Override the MAC address of the DHCP interface')), ] cfg.CONF.register_opts(OPTS) class DeviceManager(dhcp.DeviceManager): def plug(self, network, port, interface_name): mac_address = (cfg.CONF.dhcp_override_mac if cfg.CONF.dhcp_override_mac else port.mac_address) self.driver.plug(network.id, port.id, interface_name, mac_address, namespace=network.namespace, mtu=network.get('mtu'), bridge=cfg.CONF.dvs_integration_bridge) vlan_tag = getattr(network, 'provider:segmentation_id', None) # Treat vlans if vlan_tag and vlan_tag != 0: br_dvs = ovs_lib.OVSBridge(self.conf.dvs_integration_bridge) # When ovs_use_veth is set to True, the DEV_NAME_PREFIX # will be changed from 'tap' to 'ns-' in # OVSInterfaceDriver dvs_port_name = interface_name.replace('ns-', 'tap') br_dvs.set_db_attribute("Port", dvs_port_name, "tag", vlan_tag) def unplug(self, device_name, network): self.driver.unplug( device_name, bridge=cfg.CONF.dvs_integration_bridge, namespace=network.namespace) class Dnsmasq(dhcp.Dnsmasq): def __init__(self, conf, network, process_monitor, version=None, plugin=None): super(Dnsmasq, self).__init__(conf, network, process_monitor, version=version, plugin=plugin) # Using the DeviceManager that enables us to directly plug the OVS LOG.debug("Using the DVS DeviceManager") self.device_manager = DeviceManager(conf, plugin) vmware-nsx-12.0.1/vmware_nsx/plugins/dvs/__init__.py0000666000175100017510000000000013244523345022502 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/dvs/plugin.py0000666000175100017510000006122213244523345022256 0ustar zuulzuul00000000000000# Copyright 2012 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.api.definitions import port_security as psec from neutron_lib.exceptions import allowedaddresspairs as addr_exc from neutron_lib.exceptions import port_security as psec_exc from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron.api import extensions as neutron_extensions from neutron.db import _resource_extend as resource_extend from neutron.db import _utils as db_utils from neutron.db import agentschedulers_db from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db import api as db_api from neutron.db import dns_db from neutron.db import external_net_db from neutron.db import l3_db from neutron.db.models import securitygroup as securitygroup_model # noqa from neutron.db import models_v2 from neutron.db import portbindings_db from neutron.db import portsecurity_db from neutron.db import securitygroups_db from neutron.db import vlantransparent_db as vlan_ext_db from neutron.extensions import providernet from neutron.extensions import securitygroup as ext_sg from neutron.plugins.common import utils from neutron.quota import resource_registry from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef from neutron_lib.api.definitions import portbindings as pbin from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api.definitions import vlantransparent as vlan_apidef from neutron_lib.api import validators from neutron_lib import constants from neutron_lib import exceptions as n_exc import vmware_nsx from vmware_nsx._i18n import _ from vmware_nsx.common import config # noqa from vmware_nsx.common import managers as nsx_managers from vmware_nsx.common import nsx_constants from vmware_nsx.common import utils as c_utils from vmware_nsx.db import db as nsx_db from vmware_nsx.db import nsxv_db from vmware_nsx.dhcp_meta import modes as dhcpmeta_modes from vmware_nsx.dvs import dvs from vmware_nsx.dvs import dvs_utils from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.common import plugin as nsx_plugin_common from vmware_nsx.plugins.nsx import utils as tvd_utils LOG = logging.getLogger(__name__) @resource_extend.has_resource_extenders class NsxDvsV2(addr_pair_db.AllowedAddressPairsMixin, agentschedulers_db.DhcpAgentSchedulerDbMixin, nsx_plugin_common.NsxPluginBase, dhcpmeta_modes.DhcpMetadataAccess, external_net_db.External_net_db_mixin, l3_db.L3_NAT_dbonly_mixin, portbindings_db.PortBindingMixin, portsecurity_db.PortSecurityDbMixin, securitygroups_db.SecurityGroupDbMixin, dns_db.DNSDbMixin, vlan_ext_db.Vlantransparent_db_mixin): supported_extension_aliases = ["allowed-address-pairs", "binding", "external-net", "multi-provider", "port-security", "provider", "quotas", "router", "security-group", "vlan-transparent"] __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True @resource_registry.tracked_resources( network=models_v2.Network, port=models_v2.Port, subnet=models_v2.Subnet, subnetpool=models_v2.SubnetPool, security_group=securitygroup_model.SecurityGroup, security_group_rule=securitygroup_model.SecurityGroupRule) def __init__(self): self._is_sub_plugin = tvd_utils.is_tvd_core_plugin() dvs_utils.dvs_register_exceptions() super(NsxDvsV2, self).__init__() if self._is_sub_plugin: extension_drivers = cfg.CONF.nsx_tvd.dvs_extension_drivers else: extension_drivers = cfg.CONF.nsx_extension_drivers self._extension_manager = nsx_managers.ExtensionManager( extension_drivers=extension_drivers) LOG.debug('Driver support: DVS: %s' % dvs_utils.dvs_is_enabled()) self._extension_manager.initialize() self.supported_extension_aliases.extend( self._extension_manager.extension_aliases()) neutron_extensions.append_api_extensions_path( [vmware_nsx.NSX_EXT_PATH]) self.cfg_group = 'dvs' # group name for dvs section in nsx.ini self._dvs = dvs.SingleDvsManager() self.setup_dhcpmeta_access() @staticmethod def plugin_type(): return projectpluginmap.NsxPlugins.DVS @staticmethod def is_tvd_plugin(): return False @staticmethod def _extend_port_dict_binding(result, portdb): result[pbin.VIF_TYPE] = nsx_constants.VIF_TYPE_DVS port_attr = portdb.get('nsx_port_attributes') if port_attr: result[pbin.VNIC_TYPE] = port_attr.vnic_type else: result[pbin.VNIC_TYPE] = pbin.VNIC_NORMAL result[pbin.VIF_DETAILS] = { # TODO(rkukura): Replace with new VIF security details # security-groups extension supported by this plugin pbin.CAP_PORT_FILTER: True} def _extend_get_network_dict_provider(self, context, network, multiprovider=None, bindings=None): if not bindings: bindings = nsx_db.get_network_bindings(context.session, network['id']) if not multiprovider: multiprovider = nsx_db.is_multiprovider_network(context.session, network['id']) # With NSX plugin 'normal' overlay networks will have no binding # TODO(salvatore-orlando) make sure users can specify a distinct # phy_uuid as 'provider network' for STT net type if bindings: if not multiprovider: # network came in through provider networks api network[pnet.NETWORK_TYPE] = bindings[0].binding_type network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id else: # network come in though multiprovider networks api network[mpnet_apidef.SEGMENTS] = [ {pnet.NETWORK_TYPE: binding.binding_type, pnet.PHYSICAL_NETWORK: binding.phy_uuid, pnet.SEGMENTATION_ID: binding.vlan_id} for binding in bindings] def _dvs_get_id(self, net_data): if net_data['name'] == '': return net_data['id'] else: # Maximum name length is 80 characters. 'id' length is 36 # maximum prefix for name is 43 return '%s-%s' % (net_data['name'][:43], net_data['id']) def _dvs_create_network(self, context, network): net_data = network['network'] if net_data['admin_state_up'] is False: LOG.warning("Network with admin_state_up=False are not yet " "supported by this plugin. Ignoring setting for " "network %s", net_data.get('name', '')) net_data['id'] = str(uuid.uuid4()) vlan_tag = 0 if net_data.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.VLAN: vlan_tag = net_data.get(pnet.SEGMENTATION_ID, 0) trunk_mode = False # vlan transparent can be an object if not set. if net_data.get(vlan_apidef.VLANTRANSPARENT) is True: trunk_mode = True net_id = None if net_data.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.PORTGROUP: net_id = net_data.get(pnet.PHYSICAL_NETWORK) pg_info = self._dvs.get_port_group_info(net_id) if pg_info.get('name') != net_data.get('name'): err_msg = (_("Portgroup name %(dvpg)s must match network " "name %(network)s") % {'dvpg': pg_info.get('name'), 'network': net_data.get('name')}) raise n_exc.InvalidInput(error_message=err_msg) dvpg_moref = self._dvs.net_id_to_moref(net_id) dvs_id = dvpg_moref.value else: dvs_id = self._dvs_get_id(net_data) try: self._dvs.add_port_group(dvs_id, vlan_tag, trunk_mode=trunk_mode) except dvs_utils.DvsOperationBulkFault: LOG.warning('One or more hosts may not be configured') try: with db_api.context_manager.writer.using(context): new_net = super(NsxDvsV2, self).create_network(context, network) self._extension_manager.process_create_network( context, net_data, new_net) # Process port security extension self._process_network_port_security_create( context, net_data, new_net) # Process vlan transparent extension net_db = self._get_network(context, new_net['id']) net_db['vlan_transparent'] = trunk_mode net_data['vlan_transparent'] = trunk_mode resource_extend.apply_funcs('networks', net_data, net_db) nsx_db.add_network_binding( context.session, new_net['id'], net_data.get(pnet.NETWORK_TYPE), net_id or 'dvs', vlan_tag) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to create network') if (net_data.get(pnet.NETWORK_TYPE) != c_utils.NetworkTypes.PORTGROUP): self._dvs.delete_port_group(dvs_id) new_net[pnet.NETWORK_TYPE] = net_data.get(pnet.NETWORK_TYPE) new_net[pnet.PHYSICAL_NETWORK] = net_id or 'dvs' new_net[pnet.SEGMENTATION_ID] = vlan_tag # this extra lookup is necessary to get the # latest db model for the extension functions net_model = self._get_network(context, net_data['id']) resource_extend.apply_funcs('networks', new_net, net_model) self.handle_network_dhcp_access(context, new_net, action='create_network') return new_net def _validate_network(self, context, net_data): network_type = net_data.get(pnet.NETWORK_TYPE) segmentation_id = net_data.get(pnet.SEGMENTATION_ID) segmentation_id_set = validators.is_attr_set(segmentation_id) if not context.is_admin: err_msg = _("Only an admin can create a DVS provider " "network") raise n_exc.InvalidInput(error_message=err_msg) err_msg = None if (network_type == c_utils.NetworkTypes.FLAT or network_type == c_utils.NetworkTypes.PORTGROUP): if segmentation_id_set: err_msg = (_("Segmentation ID cannot be specified with " "%s network type"), network_type) elif network_type == c_utils.NetworkTypes.VLAN: if not segmentation_id_set: err_msg = _("Segmentation ID must be specified with " "vlan network type") if (segmentation_id_set and not utils.is_valid_vlan_tag(segmentation_id)): err_msg = (_("%(segmentation_id)s out of range " "(%(min_id)s through %(max_id)s)") % {'segmentation_id': segmentation_id, 'min_id': constants.MIN_VLAN_TAG, 'max_id': constants.MAX_VLAN_TAG}) else: err_msg = (_("%(net_type_param)s %(net_type_value)s not " "supported") % {'net_type_param': pnet.NETWORK_TYPE, 'net_type_value': network_type}) if err_msg: raise n_exc.InvalidInput(error_message=err_msg) def create_network(self, context, network): self._validate_network(context, network['network']) return self._dvs_create_network(context, network) def _dvs_delete_network(self, context, id): network = self._get_network(context, id) dvs_id = self._dvs_get_id(network) bindings = nsx_db.get_network_bindings(context.session, id) with db_api.context_manager.writer.using(context): nsx_db.delete_network_bindings(context.session, id) super(NsxDvsV2, self).delete_network(context, id) try: if (not bindings or bindings[0].binding_type != c_utils.NetworkTypes.PORTGROUP): self._dvs.delete_port_group(dvs_id) except Exception: LOG.exception('Unable to delete DVS port group %s', id) self.handle_network_dhcp_access(context, id, action='delete_network') def delete_network(self, context, id): self._dvs_delete_network(context, id) def _dvs_get_network(self, context, id, fields=None): with db_api.context_manager.reader.using(context): # goto to the plugin DB and fetch the network network = self._get_network(context, id) # Don't do field selection here otherwise we won't be able # to add provider networks fields net_result = self._make_network_dict(network, context=context) self._extend_get_network_dict_provider(context, net_result) return db_utils.resource_fields(net_result, fields) def get_network(self, context, id, fields=None): return self._dvs_get_network(context, id, fields=None) def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} with db_api.context_manager.reader.using(context): networks = ( super(NsxDvsV2, self).get_networks( context, filters, fields, sorts, limit, marker, page_reverse)) for net in networks: self._extend_get_network_dict_provider(context, net) return (networks if not fields else [db_utils.resource_fields(network, fields) for network in networks]) def update_network(self, context, id, network): net_attrs = network['network'] providernet._raise_if_updates_provider_attributes(net_attrs) with db_api.context_manager.writer.using(context): net_res = super(NsxDvsV2, self).update_network(context, id, network) self._extension_manager.process_update_network(context, net_attrs, net_res) # Process port security extension self._process_network_port_security_update( context, net_attrs, net_res) self._extend_get_network_dict_provider(context, net_res) return net_res def _process_vnic_type(self, context, port_data, port_id): vnic_type = port_data.get(pbin.VNIC_TYPE) if validators.is_attr_set(vnic_type): if (vnic_type != pbin.VNIC_NORMAL and vnic_type != pbin.VNIC_DIRECT and vnic_type != pbin.VNIC_DIRECT_PHYSICAL): err_msg = _("Only direct, direct-physical and normal VNIC " "types supported") raise n_exc.InvalidInput(error_message=err_msg) nsxv_db.update_nsxv_port_ext_attributes( session=context.session, port_id=port_id, vnic_type=vnic_type) def create_port(self, context, port): # If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED # then we pass the port to the policy engine. The reason why we don't # pass the value to the policy engine when the port is # ATTR_NOT_SPECIFIED is for the case where a port is created on a # shared network that is not owned by the tenant. port_data = port['port'] with db_api.context_manager.writer.using(context): # First we allocate port in neutron database neutron_db = super(NsxDvsV2, self).create_port(context, port) self._extension_manager.process_create_port( context, port_data, neutron_db) port_security = self._get_network_security_binding( context, neutron_db['network_id']) port_data[psec.PORTSECURITY] = port_security self._process_port_port_security_create( context, port_data, neutron_db) # Update fields obtained from neutron db (eg: MAC address) port["port"].update(neutron_db) has_ip = self._ip_on_port(neutron_db) # security group extension checks if has_ip: self._ensure_default_security_group_on_port(context, port) elif validators.is_attr_set(port_data.get(ext_sg.SECURITYGROUPS)): raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() port_data[ext_sg.SECURITYGROUPS] = ( self._get_security_groups_on_port(context, port)) self._process_port_create_security_group( context, port_data, port_data[ext_sg.SECURITYGROUPS]) self._process_portbindings_create_and_update(context, port['port'], port_data) # allowed address pair checks if validators.is_attr_set(port_data.get( addr_apidef.ADDRESS_PAIRS)): if not port_security: raise addr_exc.AddressPairAndPortSecurityRequired() else: self._process_create_allowed_address_pairs( context, neutron_db, port_data[addr_apidef.ADDRESS_PAIRS]) else: # remove ATTR_NOT_SPECIFIED port_data[addr_apidef.ADDRESS_PAIRS] = [] self._process_portbindings_create_and_update(context, port['port'], port_data) self._process_vnic_type(context, port_data, neutron_db['id']) LOG.debug("create_port completed on NSX for tenant " "%(tenant_id)s: (%(id)s)", port_data) # DB Operation is complete, perform DVS operation port_data = port['port'] # this extra lookup is necessary to get the # latest db model for the extension functions port_model = self._get_port(context, port_data['id']) resource_extend.apply_funcs('ports', port_data, port_model) self._extend_port_dict_binding(port_data, port_model) self.handle_port_dhcp_access(context, port_data, action='create_port') return port_data def update_port(self, context, id, port): delete_addr_pairs = self._check_update_deletes_allowed_address_pairs( port) has_addr_pairs = self._check_update_has_allowed_address_pairs(port) with db_api.context_manager.writer.using(context): ret_port = super(NsxDvsV2, self).update_port( context, id, port) # Save current mac learning state to check whether it's # being updated or not # copy values over - except fixed_ips as # they've already been processed port['port'].pop('fixed_ips', None) ret_port.update(port['port']) # populate port_security setting if psec.PORTSECURITY not in port['port']: ret_port[psec.PORTSECURITY] = self._get_port_security_binding( context, id) # validate port security and allowed address pairs if not ret_port[psec.PORTSECURITY]: # has address pairs in request if has_addr_pairs: raise addr_exc.AddressPairAndPortSecurityRequired() elif not delete_addr_pairs: # check if address pairs are in db ret_port[addr_apidef.ADDRESS_PAIRS] = ( self.get_allowed_address_pairs(context, id)) if ret_port[addr_apidef.ADDRESS_PAIRS]: raise addr_exc.AddressPairAndPortSecurityRequired() if delete_addr_pairs or has_addr_pairs: # delete address pairs and read them in self._delete_allowed_address_pairs(context, id) self._process_create_allowed_address_pairs( context, ret_port, ret_port[addr_apidef.ADDRESS_PAIRS]) if psec.PORTSECURITY in port['port']: self._process_port_port_security_update( context, port['port'], ret_port) self._process_vnic_type(context, port['port'], id) LOG.debug("Updating port: %s", port) self._extension_manager.process_update_port( context, port['port'], ret_port) self._process_portbindings_create_and_update(context, port['port'], ret_port) return ret_port def delete_port(self, context, id, l3_port_check=True, nw_gw_port_check=True): """Deletes a port on a specified Virtual Network. If the port contains a remote interface attachment, the remote interface is first un-plugged and then the port is deleted. :returns: None :raises: exception.PortInUse :raises: exception.PortNotFound :raises: exception.NetworkNotFound """ neutron_db_port = self.get_port(context, id) with db_api.context_manager.writer.using(context): # metadata_dhcp_host_route self.handle_port_metadata_access( context, neutron_db_port, is_delete=True) super(NsxDvsV2, self).delete_port(context, id) self.handle_port_dhcp_access( context, neutron_db_port, action='delete_port') def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} with db_api.context_manager.reader.using(context): ports = ( super(NsxDvsV2, self).get_ports( context, filters, fields, sorts, limit, marker, page_reverse)) # Add port extensions for port in ports: if 'id' in port: port_model = self._get_port(context, port['id']) resource_extend.apply_funcs('ports', port, port_model) self._extend_port_dict_binding(port, port_model) return (ports if not fields else [db_utils.resource_fields(port, fields) for port in ports]) def get_port(self, context, id, fields=None): port = super(NsxDvsV2, self).get_port(context, id, fields=None) if 'id' in port: port_model = self._get_port(context, port['id']) resource_extend.apply_funcs('ports', port, port_model) self._extend_port_dict_binding(port, port_model) else: port[pbin.VIF_TYPE] = nsx_constants.VIF_TYPE_DVS return db_utils.resource_fields(port, fields) def create_router(self, context, router): # DVS backend cannot support logical router msg = (_("Unable to create router %s with DVS") % router['router']['name']) raise n_exc.BadRequest(resource="router", msg=msg) def get_network_availability_zones(self, net_db): """Api to comply with the NSX-TVD plugin""" return [] vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v3/0000775000175100017510000000000013244524600021020 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v3/api_replay/0000775000175100017510000000000013244524600023145 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v3/api_replay/__init__.py0000666000175100017510000000000013244523345025253 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v3/utils.py0000666000175100017510000001271413244523345022546 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import random from oslo_config import cfg from oslo_log import log as logging from neutron import version as n_version from neutron_lib import context as q_context from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.plugins.nsx_v3 import cert_utils from vmware_nsxlib import v3 from vmware_nsxlib.v3 import client_cert from vmware_nsxlib.v3 import config NSX_NEUTRON_PLUGIN = 'NSX Neutron plugin' OS_NEUTRON_ID_SCOPE = 'os-neutron-id' LOG = logging.getLogger(__name__) class DbCertProvider(client_cert.ClientCertProvider): """Write cert data from DB to file and delete after use New provider object with random filename is created for each request. This is not most efficient, but the safest way to avoid race conditions, since backend connections can occur both before and after neutron fork, and several concurrent requests can occupy the same thread. Note that new cert filename for each request does not result in new connection for each request (at least for now..) """ EXPIRATION_ALERT_DAYS = 30 # days prior to expiration def __init__(self): super(DbCertProvider, self).__init__(None) random.seed() self._filename = '/tmp/.' + str(random.randint(1, 10000000)) def _check_expiration(self, expires_in_days): if expires_in_days > self.EXPIRATION_ALERT_DAYS: return if expires_in_days < 0: LOG.error("Client certificate has expired %d days ago.", expires_in_days * -1) else: LOG.warning("Client certificate expires in %d days. " "Once expired, service will become unavailable.", expires_in_days) def __enter__(self): try: context = q_context.get_admin_context() db_storage_driver = cert_utils.DbCertificateStorageDriver( context) with client_cert.ClientCertificateManager( cert_utils.NSX_OPENSTACK_IDENTITY, None, db_storage_driver) as cert_manager: if not cert_manager.exists(): msg = _("Unable to load from nsx-db") raise nsx_exc.ClientCertificateException(err_msg=msg) filename = self._filename if not os.path.exists(os.path.dirname(filename)): if len(os.path.dirname(filename)) > 0: os.makedirs(os.path.dirname(filename)) cert_manager.export_pem(filename) expires_in_days = cert_manager.expires_in_days() self._check_expiration(expires_in_days) except Exception as e: self._on_exit() raise e return self def _on_exit(self): if os.path.isfile(self._filename): os.remove(self._filename) self._filename = None def __exit__(self, type, value, traceback): self._on_exit() def filename(self): return self._filename def get_client_cert_provider(): if not cfg.CONF.nsx_v3.nsx_use_client_auth: return None if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() == 'none': # Admin is responsible for providing cert file, the plugin # should not touch it return client_cert.ClientCertProvider( cfg.CONF.nsx_v3.nsx_client_cert_file) if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() == 'nsx-db': # Cert data is stored in DB, and written to file system only # when new connection is opened, and deleted immediately after. return DbCertProvider def get_nsxlib_wrapper(nsx_username=None, nsx_password=None, basic_auth=False): client_cert_provider = None if not basic_auth: # if basic auth requested, dont use cert file even if provided client_cert_provider = get_client_cert_provider() nsxlib_config = config.NsxLibConfig( username=nsx_username or cfg.CONF.nsx_v3.nsx_api_user, password=nsx_password or cfg.CONF.nsx_v3.nsx_api_password, client_cert_provider=client_cert_provider, retries=cfg.CONF.nsx_v3.http_retries, insecure=cfg.CONF.nsx_v3.insecure, ca_file=cfg.CONF.nsx_v3.ca_file, concurrent_connections=cfg.CONF.nsx_v3.concurrent_connections, http_timeout=cfg.CONF.nsx_v3.http_timeout, http_read_timeout=cfg.CONF.nsx_v3.http_read_timeout, conn_idle_timeout=cfg.CONF.nsx_v3.conn_idle_timeout, http_provider=None, max_attempts=cfg.CONF.nsx_v3.retries, nsx_api_managers=cfg.CONF.nsx_v3.nsx_api_managers, plugin_scope=OS_NEUTRON_ID_SCOPE, plugin_tag=NSX_NEUTRON_PLUGIN, plugin_ver=n_version.version_info.release_string(), dns_nameservers=cfg.CONF.nsx_v3.nameservers, dns_domain=cfg.CONF.nsx_v3.dns_domain) return v3.NsxLib(nsxlib_config) vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v3/availability_zones.py0000666000175100017510000002305013244523345025271 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from vmware_nsx._i18n import _ from vmware_nsx.common import availability_zones as common_az from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc from vmware_nsxlib.v3 import core_resources from vmware_nsxlib.v3 import nsx_constants as nsxlib_consts DEFAULT_NAME = common_az.DEFAULT_NAME + 'v3' class NsxV3AvailabilityZone(common_az.ConfiguredAvailabilityZone): def init_from_config_line(self, config_line): # Not supported for nsx_v3 (old configuration) raise nsx_exc.NsxInvalidConfiguration( opt_name="availability_zones", opt_value=config_line, reason=_("Expected a list of names")) def init_from_config_section(self, az_name): az_info = config.get_nsxv3_az_opts(self.name) if cfg.CONF.nsx_v3.native_dhcp_metadata: # The optional parameters will get the global values if not # defined for this AZ self.metadata_proxy = az_info.get('metadata_proxy') if not self.metadata_proxy: raise nsx_exc.NsxInvalidConfiguration( opt_name="metadata_proxy", opt_value='None', reason=(_("metadata_proxy for availability zone %s " "must be defined") % az_name)) self.dhcp_profile = az_info.get('dhcp_profile') if not self.dhcp_profile: raise nsx_exc.NsxInvalidConfiguration( opt_name="dhcp_profile", opt_value='None', reason=(_("dhcp_profile for availability zone %s " "must be defined") % az_name)) self.native_metadata_route = az_info.get('native_metadata_route') if self.native_metadata_route is None: nmr = cfg.CONF.nsx_v3.native_metadata_route self.native_metadata_route = nmr else: self.metadata_proxy = None self.dhcp_profile = None self.native_metadata_route = None self.dns_domain = az_info.get('dns_domain') if self.dns_domain is None: self.dns_domain = cfg.CONF.nsx_v3.dns_domain self.nameservers = az_info.get('nameservers') if self.nameservers is None: self.nameservers = cfg.CONF.nsx_v3.nameservers self.default_overlay_tz = az_info.get('default_overlay_tz') if self.default_overlay_tz is None: self.default_overlay_tz = cfg.CONF.nsx_v3.default_overlay_tz self.default_vlan_tz = az_info.get('default_vlan_tz') if self.default_vlan_tz is None: self.default_vlan_tz = cfg.CONF.nsx_v3.default_vlan_tz self.switching_profiles = az_info.get('switching_profiles') if self.switching_profiles is None: self.switching_profiles = cfg.CONF.nsx_v3.switching_profiles self.dhcp_relay_service = az_info.get('dhcp_relay_service') if self.dhcp_relay_service is None: self.dhcp_relay_service = cfg.CONF.nsx_v3.dhcp_relay_service def init_default_az(self): # use the default configuration self.metadata_proxy = cfg.CONF.nsx_v3.metadata_proxy self.dhcp_profile = cfg.CONF.nsx_v3.dhcp_profile self.native_metadata_route = cfg.CONF.nsx_v3.native_metadata_route self.dns_domain = cfg.CONF.nsx_v3.dns_domain self.nameservers = cfg.CONF.nsx_v3.nameservers self.default_overlay_tz = cfg.CONF.nsx_v3.default_overlay_tz self.default_vlan_tz = cfg.CONF.nsx_v3.default_vlan_tz self.switching_profiles = cfg.CONF.nsx_v3.switching_profiles self.dhcp_relay_service = cfg.CONF.nsx_v3.dhcp_relay_service def translate_configured_names_to_uuids(self, nsxlib): # Mandatory configurations (in AZ or inherited from global values) # Unless this is the default AZ, and metadata is disabled. if self.dhcp_profile: dhcp_id = None if cfg.CONF.nsx_v3.init_objects_by_tags: # Find the TZ by its tag dhcp_id = nsxlib.get_id_by_resource_and_tag( nsxlib.native_dhcp_profile.resource_type, cfg.CONF.nsx_v3.search_objects_scope, self.dhcp_profile) if not dhcp_id: dhcp_id = nsxlib.native_dhcp_profile.get_id_by_name_or_id( self.dhcp_profile) self._native_dhcp_profile_uuid = dhcp_id else: self._native_dhcp_profile_uuid = None if self.metadata_proxy: proxy_id = None if cfg.CONF.nsx_v3.init_objects_by_tags: # Find the TZ by its tag proxy_id = nsxlib.get_id_by_resource_and_tag( nsxlib.native_md_proxy.resource_type, cfg.CONF.nsx_v3.search_objects_scope, self.metadata_proxy) if not proxy_id: proxy_id = nsxlib.native_md_proxy.get_id_by_name_or_id( self.metadata_proxy) self._native_md_proxy_uuid = proxy_id else: self._native_md_proxy_uuid = None if self.default_overlay_tz: tz_id = None if cfg.CONF.nsx_v3.init_objects_by_tags: # Find the TZ by its tag resource_type = (nsxlib.transport_zone.resource_type + ' AND transport_type:OVERLAY') tz_id = nsxlib.get_id_by_resource_and_tag( resource_type, cfg.CONF.nsx_v3.search_objects_scope, self.default_overlay_tz) if not tz_id: # Find the TZ by its name or id tz_id = nsxlib.transport_zone.get_id_by_name_or_id( self.default_overlay_tz) self._default_overlay_tz_uuid = tz_id else: self._default_overlay_tz_uuid = None # Optional configurations (may be None) if self.default_vlan_tz: tz_id = None if cfg.CONF.nsx_v3.init_objects_by_tags: # Find the TZ by its tag resource_type = (nsxlib.transport_zone.resource_type + ' AND transport_type:VLAN') tz_id = nsxlib.get_id_by_resource_and_tag( resource_type, cfg.CONF.nsx_v3.search_objects_scope, self.default_vlan_tz) if not tz_id: # Find the TZ by its name or id tz_id = nsxlib.transport_zone.get_id_by_name_or_id( self.default_vlan_tz) self._default_vlan_tz_uuid = tz_id else: self._default_vlan_tz_uuid = None # switching profiles are already uuids, but we need to translate # those to objects profiles = [] if self.switching_profiles: for profile in self.switching_profiles: nsx_profile = nsxlib.switching_profile.get(profile) # TODO(asarfaty): skip or alert on unsupported types profiles.append(core_resources.SwitchingProfileTypeId( nsx_profile.get('resource_type'), nsx_profile.get('id'))) self.switching_profiles_objs = profiles if (self.dhcp_relay_service and nsxlib.feature_supported(nsxlib_consts.FEATURE_DHCP_RELAY)): relay_id = None if cfg.CONF.nsx_v3.init_objects_by_tags: # Find the relay service by its tag relay_id = nsxlib.get_id_by_resource_and_tag( nsxlib.relay_service.resource_type, cfg.CONF.nsx_v3.search_objects_scope, self.dhcp_relay_service) if not relay_id: # Find the service by its name or id relay_id = nsxlib.relay_service.get_id_by_name_or_id( self.dhcp_relay_service) self.dhcp_relay_service = relay_id # if there is a relay service - also find the server ips if self.dhcp_relay_service: self.dhcp_relay_servers = nsxlib.relay_service.get_server_ips( self.dhcp_relay_service) else: self.dhcp_relay_service = None self.dhcp_relay_servers = None class NsxV3AvailabilityZones(common_az.ConfiguredAvailabilityZones): default_name = DEFAULT_NAME def __init__(self, use_tvd_config=False): if use_tvd_config: default_azs = cfg.CONF.nsx_tvd.nsx_v3_default_availability_zones else: default_azs = cfg.CONF.default_availability_zones super(NsxV3AvailabilityZones, self).__init__( cfg.CONF.nsx_v3.availability_zones, NsxV3AvailabilityZone, default_availability_zones=default_azs) def dhcp_relay_configured(self): for az in self.availability_zones.values(): if az.dhcp_relay_service: return True return False vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v3/__init__.py0000666000175100017510000000000013244523345023126 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v3/cert_utils.py0000666000175100017510000000716013244523345023562 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import hashlib from cryptography import fernet from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.db import db as nsx_db LOG = logging.getLogger(__name__) NSX_OPENSTACK_IDENTITY = "com.vmware.nsx.openstack" # 32-byte base64-encoded secret for symmetric password encryption # generated on init based on password provided in configuration _SECRET = None def reset_secret(): global _SECRET _SECRET = None def generate_secret_from_password(password): m = hashlib.md5() m.update(password.encode('ascii')) return base64.b64encode(m.hexdigest().encode('ascii')) def symmetric_encrypt(secret, plaintext): if not isinstance(plaintext, bytes): plaintext = plaintext.encode('ascii') return fernet.Fernet(secret).encrypt(plaintext).decode('ascii') def symmetric_decrypt(secret, ciphertext): if not isinstance(ciphertext, bytes): ciphertext = ciphertext.encode('ascii') return fernet.Fernet(secret).decrypt(ciphertext).decode('ascii') class DbCertificateStorageDriver(object): """Storage for certificate and private key in neutron DB""" def __init__(self, context): global _SECRET self._context = context if cfg.CONF.nsx_v3.nsx_client_cert_pk_password and not _SECRET: _SECRET = generate_secret_from_password( cfg.CONF.nsx_v3.nsx_client_cert_pk_password) def store_cert(self, purpose, certificate, private_key): # encrypt private key if _SECRET: private_key = symmetric_encrypt(_SECRET, private_key) nsx_db.save_certificate(self._context.session, purpose, certificate, private_key) def get_cert(self, purpose): cert, private_key = nsx_db.get_certificate(self._context.session, purpose) if _SECRET and private_key: try: # Encrypted PK is stored in DB as string, while fernet expects # bytearray. private_key = symmetric_decrypt(_SECRET, private_key) except fernet.InvalidToken: # unable to decrypt - probably due to change of password # cert and PK are useless, need to delete them LOG.error("Unable to decrypt private key, possibly due " "to change of password. Certificate needs to be " "regenerated") self.delete_cert(purpose) return None, None return cert, private_key def delete_cert(self, purpose): return nsx_db.delete_certificate(self._context.session, purpose) class DummyCertificateStorageDriver(object): """Dummy driver API implementation Used for external certificate import scenario (nsx_client_cert_storage == None) """ def store_cert(self, purpose, certificate, private_key): pass def get_cert(self, purpose): return None, None def delete_cert(self, purpose): pass vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v3/plugin.py0000666000175100017510000066447313244523413022717 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import port_security as psec from neutron_lib.api import faults from neutron_lib.api.validators import availability_zone as az_validator from neutron_lib.exceptions import allowedaddresspairs as addr_exc from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.exceptions import port_security as psec_exc from neutron_lib.plugins import constants as plugin_const from neutron_lib.plugins import directory from neutron_lib.services.qos import constants as qos_consts from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.rpc.handlers import dhcp_rpc from neutron.api.rpc.handlers import metadata_rpc from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils as nc_utils from neutron.db import _resource_extend as resource_extend from neutron.db import _utils as db_utils from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db import api as db_api from neutron.db.availability_zone import router as router_az_db from neutron.db import db_base_plugin_v2 from neutron.db import dns_db from neutron.db import external_net_db from neutron.db import extradhcpopt_db from neutron.db import extraroute_db from neutron.db import l3_attrs_db from neutron.db import l3_db from neutron.db import l3_gwmode_db from neutron.db.models import l3 as l3_db_models from neutron.db.models import securitygroup as securitygroup_model # noqa from neutron.db import models_v2 from neutron.db import portbindings_db from neutron.db import portsecurity_db from neutron.db import securitygroups_db from neutron.db import vlantransparent_db from neutron.extensions import providernet from neutron.extensions import securitygroup as ext_sg from neutron.plugins.common import utils as n_utils from neutron.quota import resource_registry from neutron_lib.api.definitions import extra_dhcp_opt as ext_edo from neutron_lib.api.definitions import portbindings as pbin from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api.definitions import vlantransparent as vlan_apidef from neutron_lib.api import validators from neutron_lib.callbacks import events from neutron_lib.callbacks import exceptions as callback_exc from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as const from neutron_lib import context as q_context from neutron_lib import exceptions as n_exc from neutron_lib.utils import helpers from neutron_lib.utils import net as nlib_net from oslo_config import cfg from oslo_context import context as context_utils from oslo_db import exception as db_exc from oslo_log import log from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import uuidutils from sqlalchemy import exc as sql_exc import webob.exc from vmware_nsx._i18n import _ from vmware_nsx.api_replay import utils as api_replay_utils from vmware_nsx.common import availability_zones as nsx_com_az from vmware_nsx.common import config # noqa from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import l3_rpc_agent_api from vmware_nsx.common import locking from vmware_nsx.common import managers from vmware_nsx.common import utils from vmware_nsx.db import db as nsx_db from vmware_nsx.db import extended_security_group from vmware_nsx.db import extended_security_group_rule as extend_sg_rule from vmware_nsx.db import maclearning as mac_db from vmware_nsx.dhcp_meta import rpc as nsx_rpc from vmware_nsx.extensions import advancedserviceproviders as as_providers from vmware_nsx.extensions import maclearning as mac_ext from vmware_nsx.extensions import projectpluginmap from vmware_nsx.extensions import providersecuritygroup as provider_sg from vmware_nsx.extensions import securitygrouplogging as sg_logging from vmware_nsx.plugins.common import plugin as nsx_plugin_common from vmware_nsx.plugins.nsx import utils as tvd_utils from vmware_nsx.plugins.nsx_v3 import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v3 import utils as v3_utils from vmware_nsx.services.fwaas.common import utils as fwaas_utils from vmware_nsx.services.fwaas.nsx_v3 import fwaas_callbacks_v1 from vmware_nsx.services.fwaas.nsx_v3 import fwaas_callbacks_v2 from vmware_nsx.services.lbaas.nsx_v3 import lb_driver_v2 from vmware_nsx.services.qos.common import utils as qos_com_utils from vmware_nsx.services.qos.nsx_v3 import driver as qos_driver from vmware_nsx.services.trunk.nsx_v3 import driver as trunk_driver from vmware_nsxlib.v3 import core_resources as nsx_resources from vmware_nsxlib.v3 import exceptions as nsx_lib_exc from vmware_nsxlib.v3 import nsx_constants as nsxlib_consts from vmware_nsxlib.v3 import security from vmware_nsxlib.v3 import utils as nsxlib_utils LOG = log.getLogger(__name__) NSX_V3_PSEC_PROFILE_NAME = 'neutron_port_spoof_guard_profile' NSX_V3_NO_PSEC_PROFILE_NAME = 'nsx-default-spoof-guard-vif-profile' NSX_V3_DHCP_PROFILE_NAME = 'neutron_port_dhcp_profile' NSX_V3_MAC_LEARNING_PROFILE_NAME = 'neutron_port_mac_learning_profile' NSX_V3_FW_DEFAULT_SECTION = 'OS Default Section for Neutron Security-Groups' NSX_V3_FW_DEFAULT_NS_GROUP = 'os_default_section_ns_group' NSX_V3_DEFAULT_SECTION = 'OS-Default-Section' NSX_V3_EXCLUDED_PORT_NSGROUP_NAME = 'neutron_excluded_port_nsgroup' NSX_V3_NON_VIF_PROFILE = 'nsx-default-switch-security-non-vif-profile' NSX_V3_SERVER_SSL_PROFILE = 'nsx-default-server-ssl-profile' NSX_V3_CLIENT_SSL_PROFILE = 'nsx-default-client-ssl-profile' def inject_headers(): ctx = context_utils.get_current() if ctx: ctx_dict = ctx.to_dict() return {'X-NSX-EUSER': ctx_dict.get('user_identity'), 'X-NSX-EREQID': ctx_dict.get('request_id')} return {} def inject_requestid_header(): ctx = context_utils.get_current() if ctx: return {'X-NSX-EREQID': ctx.__dict__.get('request_id')} return {} # NOTE(asarfaty): the order of inheritance here is important. in order for the # QoS notification to work, the AgentScheduler init must be called first # NOTE(arosen): same is true with the ExtendedSecurityGroupPropertiesMixin # this needs to be above securitygroups_db.SecurityGroupDbMixin. # FIXME(arosen): we can solve this inheritance order issue by just mixining in # the classes into a new class to handle the order correctly. @resource_extend.has_resource_extenders class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin, extended_security_group.ExtendedSecurityGroupPropertiesMixin, addr_pair_db.AllowedAddressPairsMixin, nsx_plugin_common.NsxPluginBase, extend_sg_rule.ExtendedSecurityGroupRuleMixin, securitygroups_db.SecurityGroupDbMixin, external_net_db.External_net_db_mixin, extraroute_db.ExtraRoute_db_mixin, router_az_db.RouterAvailabilityZoneMixin, l3_gwmode_db.L3_NAT_db_mixin, portbindings_db.PortBindingMixin, portsecurity_db.PortSecurityDbMixin, extradhcpopt_db.ExtraDhcpOptMixin, dns_db.DNSDbMixin, vlantransparent_db.Vlantransparent_db_mixin, mac_db.MacLearningDbMixin, nsx_com_az.NSXAvailabilityZonesPluginCommon, l3_attrs_db.ExtraAttributesMixin): __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True supported_extension_aliases = ["allowed-address-pairs", "address-scope", "quotas", "binding", "extra_dhcp_opt", "agent", "dhcp_agent_scheduler", "ext-gw-mode", "security-group", "secgroup-rule-local-ip-prefix", "port-security", "provider", "external-net", "extraroute", "router", "availability_zone", "network_availability_zone", "router_availability_zone", "subnet_allocation", "security-group-logging", "provider-security-group"] @resource_registry.tracked_resources( network=models_v2.Network, port=models_v2.Port, subnet=models_v2.Subnet, subnetpool=models_v2.SubnetPool, security_group=securitygroup_model.SecurityGroup, security_group_rule=securitygroup_model.SecurityGroupRule, router=l3_db_models.Router, floatingip=l3_db_models.FloatingIP) def __init__(self): self.fwaas_callbacks = None self._is_sub_plugin = tvd_utils.is_tvd_core_plugin() self.init_is_complete = False nsxlib_utils.set_is_attr_callback(validators.is_attr_set) self._extend_fault_map() if self._is_sub_plugin: extension_drivers = cfg.CONF.nsx_tvd.nsx_v3_extension_drivers else: extension_drivers = cfg.CONF.nsx_extension_drivers self._extension_manager = managers.ExtensionManager( extension_drivers=extension_drivers) super(NsxV3Plugin, self).__init__() # Bind the dummy L3 notifications self.l3_rpc_notifier = l3_rpc_agent_api.L3NotifyAPI() LOG.info("Starting NsxV3Plugin") self._extension_manager.initialize() self.supported_extension_aliases.extend( self._extension_manager.extension_aliases()) self.nsxlib = v3_utils.get_nsxlib_wrapper() if self.nsxlib.feature_supported(nsxlib_consts.FEATURE_ON_BEHALF_OF): nsxlib_utils.set_inject_headers_callback(inject_headers) else: nsxlib_utils.set_inject_headers_callback(inject_requestid_header) self.lbv2_driver = self._init_lbv2_driver() registry.subscribe( self.on_subnetpool_address_scope_updated, resources.SUBNETPOOL_ADDRESS_SCOPE, events.AFTER_UPDATE) self._nsx_version = self.nsxlib.get_version() LOG.info("NSX Version: %s", self._nsx_version) self.cfg_group = 'nsx_v3' # group name for nsx_v3 section in nsx.ini self.tier0_groups_dict = {} # Initialize the network availability zones, which will be used only # when native_dhcp_metadata is True self.init_availability_zones() # Translate configured transport zones, routers, dhcp profile and # metadata proxy names to uuid. self._translate_configured_names_to_uuids() self._init_dhcp_metadata() # Include default section NSGroup LOG.debug("Initializing NSX v3 default section NSGroup") self._default_section_nsgroup = None self._default_section_nsgroup = self._init_default_section_nsgroup() if not self._default_section_nsgroup: msg = _("Unable to initialize NSX v3 default section NSGroup %s" ) % NSX_V3_FW_DEFAULT_NS_GROUP raise nsx_exc.NsxPluginException(err_msg=msg) self.default_section = self._init_default_section_rules() self._process_security_group_logging() # init profiles on nsx backend self._init_nsx_profiles() # Include exclude NSGroup LOG.debug("Initializing NSX v3 Excluded Port NSGroup") self._excluded_port_nsgroup = None self._excluded_port_nsgroup = self._init_excluded_port_nsgroup() if not self._excluded_port_nsgroup: msg = _("Unable to initialize NSX v3 Excluded Port NSGroup %s" ) % NSX_V3_EXCLUDED_PORT_NSGROUP_NAME raise nsx_exc.NsxPluginException(err_msg=msg) qos_driver.register() self.start_rpc_listeners_called = False self._unsubscribe_callback_events() if cfg.CONF.api_replay_mode: self.supported_extension_aliases.append('api-replay') # Support transparent VLANS from 2.2.0 onwards. The feature is only # supported if the global configuration flag vlan_transparent is # True if cfg.CONF.vlan_transparent: if self.nsxlib.feature_supported(nsxlib_consts.FEATURE_TRUNK_VLAN): self.supported_extension_aliases.append("vlan-transparent") else: raise NotImplementedError( _("Current NSX version %s doesn't support " "transparent vlans") % self.nsxlib.get_version()) # Register NSXv3 trunk driver to support trunk extensions self.trunk_driver = trunk_driver.NsxV3TrunkDriver.create(self) # subscribe the init complete method last, so it will be called only # if init was successful registry.subscribe(self.init_complete, resources.PROCESS, events.AFTER_INIT) @staticmethod def plugin_type(): return projectpluginmap.NsxPlugins.NSX_T @staticmethod def is_tvd_plugin(): return False def init_complete(self, resource, event, trigger, payload=None): with locking.LockManager.get_lock('plugin-init-complete'): if self.init_is_complete: # Should be called only once per worker return # reinitialize the cluster upon fork for api workers to ensure # each process has its own keepalive loops + state self.nsxlib.reinitialize_cluster(resource, event, trigger, payload=payload) # Init the FWaaS support self._init_fwaas() self.init_is_complete = True def _extend_fault_map(self): """Extends the Neutron Fault Map. Exceptions specific to the NSX Plugin are mapped to standard HTTP Exceptions. """ faults.FAULT_MAP.update({nsx_lib_exc.ManagerError: webob.exc.HTTPBadRequest, nsx_lib_exc.ServiceClusterUnavailable: webob.exc.HTTPServiceUnavailable, nsx_lib_exc.ClientCertificateNotTrusted: webob.exc.HTTPBadRequest, nsx_exc.SecurityGroupMaximumCapacityReached: webob.exc.HTTPBadRequest, nsx_lib_exc.NsxLibInvalidInput: webob.exc.HTTPBadRequest, nsx_exc.NsxENSPortSecurity: webob.exc.HTTPBadRequest, }) def _init_fwaas(self): if fwaas_utils.is_fwaas_v1_plugin_enabled(): LOG.info("NSXv3 FWaaS v1 plugin enabled") self.fwaas_callbacks = fwaas_callbacks_v1.Nsxv3FwaasCallbacksV1() if fwaas_utils.is_fwaas_v2_plugin_enabled(): LOG.info("NSXv3 FWaaS v2 plugin enabled") self.fwaas_callbacks = fwaas_callbacks_v2.Nsxv3FwaasCallbacksV2() def _init_lbv2_driver(self): # Get LBaaSv2 driver during plugin initialization. If the platform # has a version that doesn't support native loadbalancing, the driver # will return a NotImplementedManager class. LOG.debug("Initializing LBaaSv2.0 nsxv3 driver") if self.nsxlib.feature_supported(nsxlib_consts.FEATURE_LOAD_BALANCER): return lb_driver_v2.EdgeLoadbalancerDriverV2() else: LOG.warning("Current NSX version %(ver)s doesn't support LBaaS", {'ver': self.nsxlib.get_version()}) return lb_driver_v2.DummyLoadbalancerDriverV2() def init_availability_zones(self): self._availability_zones_data = nsx_az.NsxV3AvailabilityZones( use_tvd_config=self._is_sub_plugin) def _init_nsx_profiles(self): LOG.debug("Initializing NSX v3 port spoofguard switching profile") if not self._init_port_security_profile(): msg = _("Unable to initialize NSX v3 port spoofguard " "switching profile: %s") % NSX_V3_PSEC_PROFILE_NAME raise nsx_exc.NsxPluginException(err_msg=msg) profile_client = self.nsxlib.switching_profile no_psec_prof = profile_client.find_by_display_name( NSX_V3_NO_PSEC_PROFILE_NAME)[0] self._no_psec_profile_id = profile_client.build_switch_profile_ids( profile_client, no_psec_prof)[0] LOG.debug("Initializing NSX v3 DHCP switching profile") try: self._init_dhcp_switching_profile() except Exception as e: msg = (_("Unable to initialize NSX v3 DHCP switching profile: " "%(id)s. Reason: %(reason)s") % { 'id': NSX_V3_DHCP_PROFILE_NAME, 'reason': str(e)}) raise nsx_exc.NsxPluginException(err_msg=msg) self._mac_learning_profile = None # Only create MAC Learning profile when nsxv3 version >= 1.1.0 if self.nsxlib.feature_supported(nsxlib_consts.FEATURE_MAC_LEARNING): LOG.debug("Initializing NSX v3 Mac Learning switching profile") try: self._init_mac_learning_profile() # Only expose the extension if it is supported self.supported_extension_aliases.append('mac-learning') except Exception as e: LOG.warning("Unable to initialize NSX v3 MAC Learning " "profile: %(name)s. Reason: %(reason)s", {'name': NSX_V3_MAC_LEARNING_PROFILE_NAME, 'reason': e}) no_switch_security_prof = profile_client.find_by_display_name( NSX_V3_NON_VIF_PROFILE)[0] self._no_switch_security = profile_client.build_switch_profile_ids( profile_client, no_switch_security_prof)[0] self.server_ssl_profile = None self.client_ssl_profile = None # Only create LB profiles when nsxv3 version >= 2.1.0 if self.nsxlib.feature_supported(nsxlib_consts.FEATURE_LOAD_BALANCER): LOG.debug("Initializing NSX v3 Load Balancer default profiles") try: self._init_lb_profiles() except Exception as e: msg = (_("Unable to initialize NSX v3 lb profiles: " "Reason: %(reason)s") % {'reason': str(e)}) raise nsx_exc.NsxPluginException(err_msg=msg) def _translate_configured_names_to_uuids(self): # If using tags to find the objects, make sure tag scope is configured if (cfg.CONF.nsx_v3.init_objects_by_tags and not cfg.CONF.nsx_v3.search_objects_scope): raise cfg.RequiredOptError("search_objects_scope", group=cfg.OptGroup('nsx_v3')) # default tier0 router self._default_tier0_router = None if cfg.CONF.nsx_v3.default_tier0_router: rtr_id = None if cfg.CONF.nsx_v3.init_objects_by_tags: # Find the router by its tag resource_type = (self.nsxlib.logical_router.resource_type + ' AND router_type:TIER0') rtr_id = self.nsxlib.get_id_by_resource_and_tag( resource_type, cfg.CONF.nsx_v3.search_objects_scope, cfg.CONF.nsx_v3.default_tier0_router) if not rtr_id: # find the router by name or id rtr_id = self.nsxlib.logical_router.get_id_by_name_or_id( cfg.CONF.nsx_v3.default_tier0_router) self._default_tier0_router = rtr_id # Validate and translate native dhcp profiles per az if cfg.CONF.nsx_v3.native_dhcp_metadata: if not cfg.CONF.nsx_v3.dhcp_profile: raise cfg.RequiredOptError("dhcp_profile", group=cfg.OptGroup('nsx_v3')) if not cfg.CONF.nsx_v3.metadata_proxy: raise cfg.RequiredOptError("metadata_proxy", group=cfg.OptGroup('nsx_v3')) # Translate all the uuids in each of the availability for az in self.get_azs_list(): az.translate_configured_names_to_uuids(self.nsxlib) def _extend_nsx_port_dict_binding(self, context, port_data): # Not using the register api for this because we need the context port_data[pbin.VIF_TYPE] = pbin.VIF_TYPE_OVS port_data[pbin.VNIC_TYPE] = pbin.VNIC_NORMAL if 'network_id' in port_data: port_data[pbin.VIF_DETAILS] = { # TODO(rkukura): Replace with new VIF security details pbin.CAP_PORT_FILTER: 'security-group' in self.supported_extension_aliases, 'nsx-logical-switch-id': self._get_network_nsx_id(context, port_data['network_id'])} @nsxlib_utils.retry_upon_exception( Exception, max_attempts=cfg.CONF.nsx_v3.retries) def _init_default_section_nsgroup(self): with locking.LockManager.get_lock('nsxv3_init_default_nsgroup'): nsgroup = self._get_default_section_nsgroup() if not nsgroup: # Create a new NSGroup for default section membership_criteria = ( self.nsxlib.ns_group.get_port_tag_expression( security.PORT_SG_SCOPE, NSX_V3_DEFAULT_SECTION)) nsgroup = self.nsxlib.ns_group.create( NSX_V3_FW_DEFAULT_NS_GROUP, 'OS Default Section Port NSGroup', tags=self.nsxlib.build_v3_api_version_tag(), membership_criteria=membership_criteria) return self._get_default_section_nsgroup() def _get_default_section_nsgroup(self): if self._default_section_nsgroup: return self._default_section_nsgroup nsgroups = self.nsxlib.ns_group.find_by_display_name( NSX_V3_FW_DEFAULT_NS_GROUP) return nsgroups[0] if nsgroups else None @nsxlib_utils.retry_upon_exception( Exception, max_attempts=cfg.CONF.nsx_v3.retries) def _init_excluded_port_nsgroup(self): with locking.LockManager.get_lock('nsxv3_excluded_port_nsgroup_init'): nsgroup = self._get_excluded_port_nsgroup() if not nsgroup: # Create a new NSGroup for excluded ports. membership_criteria = ( self.nsxlib.ns_group.get_port_tag_expression( security.PORT_SG_SCOPE, nsxlib_consts.EXCLUDE_PORT)) nsgroup = self.nsxlib.ns_group.create( NSX_V3_EXCLUDED_PORT_NSGROUP_NAME, 'Neutron Excluded Port NSGroup', tags=self.nsxlib.build_v3_api_version_tag(), membership_criteria=membership_criteria) # Add this NSGroup to NSX Exclusion List. self.nsxlib.firewall_section.add_member_to_fw_exclude_list( nsgroup['id'], nsxlib_consts.NSGROUP) return self._get_excluded_port_nsgroup() def _get_excluded_port_nsgroup(self): if self._excluded_port_nsgroup: return self._excluded_port_nsgroup nsgroups = self.nsxlib.ns_group.find_by_display_name( NSX_V3_EXCLUDED_PORT_NSGROUP_NAME) return nsgroups[0] if nsgroups else None def _unsubscribe_callback_events(self): # l3_db explicitly subscribes to the port delete callback. This # callback is unsubscribed here since l3 APIs are handled by # core_plugin instead of an advanced service, in case of NSXv3 plugin, # and the prevention logic is handled by NSXv3 plugin itself. registry.unsubscribe( l3_db.L3_NAT_dbonly_mixin._prevent_l3_port_delete_callback, resources.PORT, events.BEFORE_DELETE) def _validate_dhcp_profile(self, dhcp_profile_uuid): dhcp_profile = self.nsxlib.switching_profile.get(dhcp_profile_uuid) if (dhcp_profile.get('resource_type') != nsx_resources.SwitchingProfileTypes.SWITCH_SECURITY): msg = _("Invalid configuration on the backend for DHCP " "switching profile %s. Switching Profile must be of type " "'Switch Security'") % dhcp_profile_uuid raise n_exc.InvalidInput(error_message=msg) dhcp_filter = dhcp_profile.get('dhcp_filter') if (not dhcp_filter or dhcp_filter.get('client_block_enabled') or dhcp_filter.get('server_block_enabled')): msg = _("Invalid configuration on the backend for DHCP " "switching profile %s. DHCP Server Block and Client Block " "must be disabled") % dhcp_profile_uuid raise n_exc.InvalidInput(error_message=msg) @nsxlib_utils.retry_upon_exception( Exception, max_attempts=cfg.CONF.nsx_v3.retries) def _init_dhcp_switching_profile(self): with locking.LockManager.get_lock('nsxv3_dhcp_profile_init'): if not self._get_dhcp_security_profile(): self.nsxlib.switching_profile.create_dhcp_profile( NSX_V3_DHCP_PROFILE_NAME, 'Neutron DHCP Security Profile', tags=self.nsxlib.build_v3_api_version_tag()) return self._get_dhcp_security_profile() def _get_dhcp_security_profile(self): if hasattr(self, '_dhcp_profile') and self._dhcp_profile: return self._dhcp_profile profile = self.nsxlib.switching_profile.find_by_display_name( NSX_V3_DHCP_PROFILE_NAME) self._dhcp_profile = nsx_resources.SwitchingProfileTypeId( profile_type=(nsx_resources.SwitchingProfileTypes. SWITCH_SECURITY), profile_id=profile[0]['id']) if profile else None return self._dhcp_profile def _init_mac_learning_profile(self): with locking.LockManager.get_lock('nsxv3_mac_learning_profile_init'): if not self._get_mac_learning_profile(): self.nsxlib.switching_profile.create_mac_learning_profile( NSX_V3_MAC_LEARNING_PROFILE_NAME, 'Neutron MAC Learning Profile', tags=self.nsxlib.build_v3_api_version_tag()) return self._get_mac_learning_profile() def _get_mac_learning_profile(self): if (hasattr(self, '_mac_learning_profile') and self._mac_learning_profile): return self._mac_learning_profile profile = self.nsxlib.switching_profile.find_by_display_name( NSX_V3_MAC_LEARNING_PROFILE_NAME) self._mac_learning_profile = nsx_resources.SwitchingProfileTypeId( profile_type=(nsx_resources.SwitchingProfileTypes. MAC_LEARNING), profile_id=profile[0]['id']) if profile else None return self._mac_learning_profile def _init_lb_profiles(self): with locking.LockManager.get_lock('nsxv3_lb_profiles_init'): lb_profiles = self._get_lb_profiles() if not lb_profiles.get('client_ssl_profile'): self.nsxlib.load_balancer.client_ssl_profile.create( NSX_V3_CLIENT_SSL_PROFILE, 'Neutron LB Client SSL Profile', tags=self.nsxlib.build_v3_api_version_tag()) if not lb_profiles.get('server_ssl_profile'): self.nsxlib.load_balancer.server_ssl_profile.create( NSX_V3_SERVER_SSL_PROFILE, 'Neutron LB Server SSL Profile', tags=self.nsxlib.build_v3_api_version_tag()) def _get_lb_profiles(self): if not self.client_ssl_profile: ssl_profile_client = self.nsxlib.load_balancer.client_ssl_profile profile = ssl_profile_client.find_by_display_name( NSX_V3_CLIENT_SSL_PROFILE) self.client_ssl_profile = profile[0]['id'] if profile else None if not self.server_ssl_profile: ssl_profile_client = self.nsxlib.load_balancer.server_ssl_profile profile = ssl_profile_client.find_by_display_name( NSX_V3_SERVER_SSL_PROFILE) self.server_ssl_profile = profile[0]['id'] if profile else None return {'client_ssl_profile': self.client_ssl_profile, 'server_ssl_profile': self.server_ssl_profile} def _get_port_security_profile_id(self): return self.nsxlib.switching_profile.build_switch_profile_ids( self.nsxlib.switching_profile, self._psec_profile)[0] def _get_port_security_profile(self): if hasattr(self, '_psec_profile') and self._psec_profile: return self._psec_profile profile = self.nsxlib.switching_profile.find_by_display_name( NSX_V3_PSEC_PROFILE_NAME) self._psec_profile = profile[0] if profile else None return self._psec_profile @nsxlib_utils.retry_upon_exception( Exception, max_attempts=cfg.CONF.nsx_v3.retries) def _init_port_security_profile(self): profile = self._get_port_security_profile() if profile: return profile with locking.LockManager.get_lock('nsxv3_psec_profile_init'): # NOTE(boden): double-checked locking pattern profile = self._get_port_security_profile() if profile: return profile self.nsxlib.switching_profile.create_spoofguard_profile( NSX_V3_PSEC_PROFILE_NAME, 'Neutron Port Security Profile', whitelist_ports=True, whitelist_switches=False, tags=self.nsxlib.build_v3_api_version_tag()) return self._get_port_security_profile() def _process_security_group_logging(self): def process_security_group_logging(*args, **kwargs): context = q_context.get_admin_context() log_all_rules = cfg.CONF.nsx_v3.log_security_groups_allowed_traffic secgroups = self.get_security_groups(context, fields=['id', sg_logging.LOGGING]) for sg in [sg for sg in secgroups if sg.get(sg_logging.LOGGING) is False]: nsgroup_id, section_id = nsx_db.get_sg_mappings( context.session, sg['id']) if section_id: try: self.nsxlib.firewall_section.set_rule_logging( section_id, logging=log_all_rules) except nsx_lib_exc.ManagerError: LOG.error("Failed to update firewall rule logging " "for rule in section %s", section_id) utils.spawn_n(process_security_group_logging) def _init_default_section_rules(self): with locking.LockManager.get_lock('nsxv3_default_section'): section_description = ("This section is handled by OpenStack to " "contain default rules on security-groups.") section_id = self.nsxlib.firewall_section.init_default( NSX_V3_FW_DEFAULT_SECTION, section_description, [self._default_section_nsgroup.get('id')], cfg.CONF.nsx_v3.log_security_groups_blocked_traffic) return section_id def _init_dhcp_metadata(self): if cfg.CONF.nsx_v3.native_dhcp_metadata: if cfg.CONF.dhcp_agent_notification: msg = _("Need to disable dhcp_agent_notification when " "native_dhcp_metadata is enabled") raise nsx_exc.NsxPluginException(err_msg=msg) self._init_native_dhcp() self._init_native_metadata() else: self._setup_dhcp() self._start_rpc_notifiers() def _init_native_dhcp(self): try: for az in self.get_azs_list(): self.nsxlib.native_dhcp_profile.get( az._native_dhcp_profile_uuid) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error("Unable to retrieve DHCP Profile %s, " "native DHCP service is not supported", az._native_dhcp_profile_uuid) def _init_native_metadata(self): try: for az in self.get_azs_list(): self.nsxlib.native_md_proxy.get(az._native_md_proxy_uuid) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error("Unable to retrieve Metadata Proxy %s, " "native metadata service is not supported", az._native_md_proxy_uuid) def _setup_rpc(self): self.endpoints = [dhcp_rpc.DhcpRpcCallback(), agents_db.AgentExtRpcCallback(), metadata_rpc.MetadataRpcCallback()] def _setup_dhcp(self): """Initialize components to support DHCP.""" self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver ) self.add_periodic_dhcp_agent_status_check() def _start_rpc_notifiers(self): """Initialize RPC notifiers for agents.""" self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( dhcp_rpc_agent_api.DhcpAgentNotifyAPI() ) def start_rpc_listeners(self): if self.start_rpc_listeners_called: # If called more than once - we should not create it again return self.conn.consume_in_threads() self._setup_rpc() self.topic = topics.PLUGIN self.conn = n_rpc.create_connection() self.conn.create_consumer(self.topic, self.endpoints, fanout=False) self.conn.create_consumer(topics.REPORTS, [agents_db.AgentExtRpcCallback()], fanout=False) self.start_rpc_listeners_called = True return self.conn.consume_in_threads() def _validate_provider_create(self, context, network_data, az, transparent_vlan): is_provider_net = any( validators.is_attr_set(network_data.get(f)) for f in (pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)) physical_net = network_data.get(pnet.PHYSICAL_NETWORK) if not validators.is_attr_set(physical_net): physical_net = None vlan_id = network_data.get(pnet.SEGMENTATION_ID) if not validators.is_attr_set(vlan_id): vlan_id = None if vlan_id and transparent_vlan: err_msg = (_("Segmentation ID cannot be set with transparent " "vlan!")) raise n_exc.InvalidInput(error_message=err_msg) err_msg = None net_type = network_data.get(pnet.NETWORK_TYPE) nsxlib_tz = self.nsxlib.transport_zone tz_type = nsxlib_tz.TRANSPORT_TYPE_VLAN switch_mode = nsxlib_tz.HOST_SWITCH_MODE_STANDARD if validators.is_attr_set(net_type): if net_type == utils.NsxV3NetworkTypes.FLAT: if vlan_id is not None: err_msg = (_("Segmentation ID cannot be specified with " "%s network type") % utils.NsxV3NetworkTypes.FLAT) else: if not transparent_vlan: # Set VLAN id to 0 for flat networks vlan_id = '0' if physical_net is None: physical_net = az._default_vlan_tz_uuid elif (net_type == utils.NsxV3NetworkTypes.VLAN and not transparent_vlan): # Use default VLAN transport zone if physical network not given if physical_net is None: physical_net = az._default_vlan_tz_uuid # Validate VLAN id if not vlan_id: err_msg = (_('Segmentation ID must be specified with %s ' 'network type') % utils.NsxV3NetworkTypes.VLAN) elif not n_utils.is_valid_vlan_tag(vlan_id): err_msg = (_('Segmentation ID %(segmentation_id)s out of ' 'range (%(min_id)s through %(max_id)s)') % {'segmentation_id': vlan_id, 'min_id': const.MIN_VLAN_TAG, 'max_id': const.MAX_VLAN_TAG}) else: # Verify VLAN id is not already allocated bindings = ( nsx_db.get_network_bindings_by_vlanid_and_physical_net( context.session, vlan_id, physical_net) ) if bindings: raise n_exc.VlanIdInUse( vlan_id=vlan_id, physical_network=physical_net) elif (net_type == utils.NsxV3NetworkTypes.VLAN and transparent_vlan): # Use default VLAN transport zone if physical network not given if physical_net is None: physical_net = az._default_vlan_tz_uuid elif net_type == utils.NsxV3NetworkTypes.GENEVE: if vlan_id: err_msg = (_("Segmentation ID cannot be specified with " "%s network type") % utils.NsxV3NetworkTypes.GENEVE) tz_type = nsxlib_tz.TRANSPORT_TYPE_OVERLAY elif net_type == utils.NsxV3NetworkTypes.NSX_NETWORK: # Linking neutron networks to an existing NSX logical switch if physical_net is None: err_msg = (_("Physical network must be specified with " "%s network type") % net_type) # Validate the logical switch existence try: nsx_net = self.nsxlib.logical_switch.get(physical_net) switch_mode = nsxlib_tz.get_host_switch_mode( nsx_net['transport_zone_id']) except nsx_lib_exc.ResourceNotFound: err_msg = (_('Logical switch %s does not exist') % physical_net) # make sure no other neutron network is using it bindings = ( nsx_db.get_network_bindings_by_vlanid_and_physical_net( context.elevated().session, 0, physical_net)) if bindings: err_msg = (_('Logical switch %s is already used by ' 'another network') % physical_net) else: err_msg = (_('%(net_type_param)s %(net_type_value)s not ' 'supported') % {'net_type_param': pnet.NETWORK_TYPE, 'net_type_value': net_type}) elif is_provider_net: # FIXME: Ideally provider-network attributes should be checked # at the NSX backend. For now, the network_type is required, # so the plugin can do a quick check locally. err_msg = (_('%s is required for creating a provider network') % pnet.NETWORK_TYPE) else: net_type = None if physical_net is None: # Default to transport type overlay physical_net = az._default_overlay_tz_uuid # validate the transport zone existence and type if (not err_msg and physical_net and net_type != utils.NsxV3NetworkTypes.NSX_NETWORK): if is_provider_net: try: backend_type = nsxlib_tz.get_transport_type( physical_net) except nsx_lib_exc.ResourceNotFound: err_msg = (_('Transport zone %s does not exist') % physical_net) else: if backend_type != tz_type: err_msg = (_('%(tz)s transport zone is required for ' 'creating a %(net)s provider network') % {'tz': tz_type, 'net': net_type}) if not err_msg: switch_mode = nsxlib_tz.get_host_switch_mode(physical_net) if err_msg: raise n_exc.InvalidInput(error_message=err_msg) return {'is_provider_net': is_provider_net, 'net_type': net_type, 'physical_net': physical_net, 'vlan_id': vlan_id, 'switch_mode': switch_mode} def _get_edge_cluster(self, tier0_uuid): self.nsxlib.router.validate_tier0(self.tier0_groups_dict, tier0_uuid) tier0_info = self.tier0_groups_dict[tier0_uuid] return tier0_info['edge_cluster_uuid'] def _validate_external_net_create(self, net_data): if not validators.is_attr_set(net_data.get(pnet.PHYSICAL_NETWORK)): tier0_uuid = self._default_tier0_router else: tier0_uuid = net_data[pnet.PHYSICAL_NETWORK] if ((validators.is_attr_set(net_data.get(pnet.NETWORK_TYPE)) and net_data.get(pnet.NETWORK_TYPE) != utils.NetworkTypes.L3_EXT) or validators.is_attr_set(net_data.get(pnet.SEGMENTATION_ID))): msg = _("Invalid provider network configuration") raise n_exc.InvalidInput(error_message=msg) self.nsxlib.router.validate_tier0(self.tier0_groups_dict, tier0_uuid) return (True, utils.NetworkTypes.L3_EXT, tier0_uuid, 0) def _create_network_at_the_backend(self, context, net_data, az, transparent_vlan): provider_data = self._validate_provider_create(context, net_data, az, transparent_vlan) if (provider_data['switch_mode'] == self.nsxlib.transport_zone.HOST_SWITCH_MODE_ENS): if not cfg.CONF.nsx_v3.ens_support: raise NotImplementedError(_("ENS support is disabled")) if net_data.get(psec.PORTSECURITY): raise nsx_exc.NsxENSPortSecurity() # set the default port security to False net_data[psec.PORTSECURITY] = False if (provider_data['is_provider_net'] and provider_data['net_type'] == utils.NsxV3NetworkTypes.NSX_NETWORK): # Network already exists on the NSX backend nsx_id = provider_data['physical_net'] else: # Create network on the backend neutron_net_id = net_data.get('id') or uuidutils.generate_uuid() # To ensure that the correct tag will be set net_data['id'] = neutron_net_id # update the network name to indicate the neutron id too. net_name = utils.get_name_and_uuid(net_data['name'] or 'network', neutron_net_id) tags = self.nsxlib.build_v3_tags_payload( net_data, resource_type='os-neutron-net-id', project_name=context.tenant_name) admin_state = net_data.get('admin_state_up', True) LOG.debug('create_network: %(net_name)s, %(physical_net)s, ' '%(tags)s, %(admin_state)s, %(vlan_id)s', {'net_name': net_name, 'physical_net': provider_data['physical_net'], 'tags': tags, 'admin_state': admin_state, 'vlan_id': provider_data['vlan_id']}) trunk_vlan_range = None if transparent_vlan: # all vlan tags are allowed for guest vlan trunk_vlan_range = [0, const.MAX_VLAN_TAG] nsx_result = self.nsxlib.logical_switch.create( net_name, provider_data['physical_net'], tags, admin_state=admin_state, vlan_id=provider_data['vlan_id'], description=net_data.get('description'), trunk_vlan_range=trunk_vlan_range) nsx_id = nsx_result['id'] return (provider_data['is_provider_net'], provider_data['net_type'], provider_data['physical_net'], provider_data['vlan_id'], nsx_id) def _is_ddi_supported_on_network(self, context, network_id): return (self.nsxlib.feature_supported( nsxlib_consts.FEATURE_VLAN_ROUTER_INTERFACE) or self._is_overlay_network(context, network_id)) def _is_overlay_network(self, context, network_id): """Return True if this is an overlay network 1. No binding ("normal" overlay networks will have no binding) 2. Geneve network 3. nsx network where the backend network is connected to an overlay TZ """ bindings = nsx_db.get_network_bindings(context.session, network_id) # With NSX plugin, "normal" overlay networks will have no binding if not bindings: return True binding = bindings[0] if binding.binding_type == utils.NsxV3NetworkTypes.GENEVE: return True if binding.binding_type == utils.NsxV3NetworkTypes.NSX_NETWORK: # check the backend network # TODO(asarfaty): Keep TZ type in DB to avoid going to the backend ls = self.nsxlib.logical_switch.get(binding.phy_uuid) tz = ls.get('transport_zone_id') if tz: backend_type = self.nsxlib.transport_zone.get_transport_type( tz) return (backend_type == self.nsxlib.transport_zone.TRANSPORT_TYPE_OVERLAY) return False def _extend_network_dict_provider(self, context, network, bindings=None): if 'id' not in network: return if not bindings: bindings = nsx_db.get_network_bindings(context.session, network['id']) # With NSX plugin, "normal" overlay networks will have no binding if bindings: # Network came in through provider networks API network[pnet.NETWORK_TYPE] = bindings[0].binding_type network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id def _assert_on_external_net_with_qos(self, net_data): # Prevent creating/update external network with QoS policy if validators.is_attr_set(net_data.get(qos_consts.QOS_POLICY_ID)): err_msg = _("Cannot configure QOS on external networks") raise n_exc.InvalidInput(error_message=err_msg) def get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} lswitch_ids = filters.pop(as_providers.ADV_SERVICE_PROVIDERS, []) if lswitch_ids: # This is a request from Nova for metadata processing. # Find the corresponding neutron network for each logical switch. network_ids = filters.pop('network_id', []) context = context.elevated() for lswitch_id in lswitch_ids: network_ids += nsx_db.get_net_ids(context.session, lswitch_id) filters['network_id'] = network_ids return super(NsxV3Plugin, self).get_subnets( context, filters, fields, sorts, limit, marker, page_reverse) def _network_is_nsx_net(self, context, network_id): bindings = nsx_db.get_network_bindings(context.session, network_id) if not bindings: return False return (bindings[0].binding_type == utils.NsxV3NetworkTypes.NSX_NETWORK) def create_network(self, context, network): net_data = network['network'] external = net_data.get(extnet_apidef.EXTERNAL) is_backend_network = False is_ddi_network = False tenant_id = net_data['tenant_id'] # validate the availability zone, and get the AZ object if az_def.AZ_HINTS in net_data: self._validate_availability_zones_forced( context, 'network', net_data[az_def.AZ_HINTS]) az = self.get_obj_az_by_hints(net_data) self._ensure_default_security_group(context, tenant_id) # Update the transparent vlan if configured vlt = False if nc_utils.is_extension_supported(self, 'vlan-transparent'): vlt = vlan_apidef.get_vlan_transparent(net_data) nsx_net_id = None if validators.is_attr_set(external) and external: self._assert_on_external_net_with_qos(net_data) is_provider_net, net_type, physical_net, vlan_id = ( self._validate_external_net_create(net_data)) else: is_provider_net, net_type, physical_net, vlan_id, nsx_net_id = ( self._create_network_at_the_backend(context, net_data, az, vlt)) is_backend_network = True try: rollback_network = False with db_api.context_manager.writer.using(context): # Create network in Neutron created_net = super(NsxV3Plugin, self).create_network(context, network) self._extension_manager.process_create_network( context, net_data, created_net) if psec.PORTSECURITY not in net_data: net_data[psec.PORTSECURITY] = True self._process_network_port_security_create( context, net_data, created_net) self._process_l3_create(context, created_net, net_data) if az_def.AZ_HINTS in net_data: # Update the AZ hints in the neutron object az_hints = az_validator.convert_az_list_to_string( net_data[az_def.AZ_HINTS]) super(NsxV3Plugin, self).update_network( context, created_net['id'], {'network': {az_def.AZ_HINTS: az_hints}}) if is_provider_net: # Save provider network fields, needed by get_network() net_bindings = [nsx_db.add_network_binding( context.session, created_net['id'], net_type, physical_net, vlan_id)] self._extend_network_dict_provider(context, created_net, bindings=net_bindings) if is_backend_network: # Add neutron-id <-> nsx-id mapping to the DB # after the network creation is done neutron_net_id = created_net['id'] nsx_db.add_neutron_nsx_network_mapping( context.session, neutron_net_id, nsx_net_id) if nc_utils.is_extension_supported(self, 'vlan-transparent'): super(NsxV3Plugin, self).update_network(context, created_net['id'], {'network': {'vlan_transparent': vlt}}) rollback_network = True is_ddi_network = self._is_ddi_supported_on_network( context, created_net['id']) if (is_backend_network and cfg.CONF.nsx_v3.native_dhcp_metadata and is_ddi_network): # Enable native metadata proxy for this network. tags = self.nsxlib.build_v3_tags_payload( created_net, resource_type='os-neutron-net-id', project_name=context.tenant_name) name = utils.get_name_and_uuid('%s-%s' % ( 'mdproxy', created_net['name'] or 'network'), created_net['id']) md_port = self.nsxlib.logical_port.create( nsx_net_id, az._native_md_proxy_uuid, tags=tags, name=name, attachment_type=nsxlib_consts.ATTACHMENT_MDPROXY) LOG.debug("Created MD-Proxy logical port %(port)s " "for network %(network)s", {'port': md_port['id'], 'network': created_net['id']}) except Exception: with excutils.save_and_reraise_exception(): # Undo creation on the backend LOG.exception('Failed to create network') if (nsx_net_id and net_type != utils.NsxV3NetworkTypes.NSX_NETWORK): self.nsxlib.logical_switch.delete(nsx_net_id) if (cfg.CONF.nsx_v3.native_dhcp_metadata and is_backend_network and is_ddi_network): # Delete the mdproxy port manually self._delete_network_nsx_dhcp_port(created_net['id']) if rollback_network: super(NsxV3Plugin, self).delete_network( context, created_net['id']) # this extra lookup is necessary to get the # latest db model for the extension functions net_model = self._get_network(context, created_net['id']) resource_extend.apply_funcs('networks', created_net, net_model) # Update the QoS policy (will affect only future compute ports) qos_com_utils.set_qos_policy_on_new_net( context, net_data, created_net) return created_net def _has_active_port(self, context, network_id): ports_in_use = context.session.query(models_v2.Port).filter_by( network_id=network_id).all() return not all([p.device_owner in db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS for p in ports_in_use]) if ports_in_use else False def _retry_delete_network(self, context, network_id): """This method attempts to retry the delete on a network if there are AUTO_DELETE_PORT_OWNERS left. This is to avoid a race condition between delete_network and the dhcp creating a port on the network. """ first_try = True while True: try: with db_api.context_manager.writer.using(context): self._process_l3_delete(context, network_id) return super(NsxV3Plugin, self).delete_network( context, network_id) except n_exc.NetworkInUse: # There is a race condition in delete_network() that we need # to work around here. delete_network() issues a query to # automatically delete DHCP ports and then checks to see if any # ports exist on the network. If a network is created and # deleted quickly, such as when running tempest, the DHCP agent # may be creating its port for the network around the same time # that the network is deleted. This can result in the DHCP # port getting created in between these two queries in # delete_network(). To work around that, we'll call # delete_network() a second time if we get a NetworkInUse # exception but the only port(s) that exist are ones that # delete_network() is supposed to automatically delete. if not first_try: # We tried once to work around the known race condition, # but we still got the exception, so something else is # wrong that we can't recover from. raise first_try = False if self._has_active_port(context, network_id): # There is a port on the network that is not going to be # automatically deleted (such as a tenant created port), so # we have nothing else to do but raise the exception. raise def _delete_network_nsx_dhcp_port(self, network_id): port_id = self.nsxlib.get_id_by_resource_and_tag( self.nsxlib.logical_port.resource_type, 'os-neutron-net-id', network_id) if port_id: self.nsxlib.logical_port.delete(port_id) def delete_network(self, context, network_id): if cfg.CONF.nsx_v3.native_dhcp_metadata: lock = 'nsxv3_network_' + network_id with locking.LockManager.get_lock(lock): # Disable native DHCP if there is no other existing port # besides DHCP port. if not self._has_active_port(context, network_id): self._disable_native_dhcp(context, network_id) nsx_net_id = self._get_network_nsx_id(context, network_id) is_nsx_net = self._network_is_nsx_net(context, network_id) is_ddi_network = self._is_ddi_supported_on_network(context, network_id) # First call DB operation for delete network as it will perform # checks on active ports self._retry_delete_network(context, network_id) if (not self._network_is_external(context, network_id) and not is_nsx_net): # TODO(salv-orlando): Handle backend failure, possibly without # requiring us to un-delete the DB object. For instance, ignore # failures occurring if logical switch is not found self.nsxlib.logical_switch.delete(nsx_net_id) else: if (cfg.CONF.nsx_v3.native_dhcp_metadata and is_nsx_net and is_ddi_network): # Delete the mdproxy port manually self._delete_network_nsx_dhcp_port(network_id) # TODO(berlin): delete subnets public announce on the network def _get_network_nsx_id(self, context, neutron_id): # get the nsx switch id from the DB mapping mappings = nsx_db.get_nsx_switch_ids(context.session, neutron_id) if not mappings or len(mappings) == 0: LOG.debug("Unable to find NSX mappings for neutron " "network %s.", neutron_id) # fallback in case we didn't find the id in the db mapping # This should not happen, but added here in case the network was # created before this code was added. return neutron_id else: return mappings[0] def update_network(self, context, id, network): original_net = super(NsxV3Plugin, self).get_network(context, id) net_data = network['network'] # Neutron does not support changing provider network values providernet._raise_if_updates_provider_attributes(net_data) extern_net = self._network_is_external(context, id) is_nsx_net = self._network_is_nsx_net(context, id) if extern_net: self._assert_on_external_net_with_qos(net_data) updated_net = super(NsxV3Plugin, self).update_network(context, id, network) self._extension_manager.process_update_network(context, net_data, updated_net) if psec.PORTSECURITY in net_data: # do not allow to enable port security on ENS networks if (net_data[psec.PORTSECURITY] and not original_net[psec.PORTSECURITY] and self._is_ens_tz_net(context, id)): raise nsx_exc.NsxENSPortSecurity() self._process_network_port_security_update( context, net_data, updated_net) self._process_l3_update(context, updated_net, network['network']) self._extend_network_dict_provider(context, updated_net) if (not extern_net and not is_nsx_net and ('name' in net_data or 'admin_state_up' in net_data or 'description' in net_data)): try: # get the nsx switch id from the DB mapping nsx_id = self._get_network_nsx_id(context, id) net_name = net_data.get('name', original_net.get('name')) or 'network' self.nsxlib.logical_switch.update( nsx_id, name=utils.get_name_and_uuid(net_name, id), admin_state=net_data.get('admin_state_up'), description=net_data.get('description')) # Backend does not update the admin state of the ports on # the switch when the switch's admin state changes. Do not # update the admin state of the ports in neutron either. except nsx_lib_exc.ManagerError: LOG.exception("Unable to update NSX backend, rolling " "back changes on neutron") with excutils.save_and_reraise_exception(): super(NsxV3Plugin, self).update_network( context, id, {'network': original_net}) if qos_consts.QOS_POLICY_ID in net_data: # attach the policy to the network in neutron DB #(will affect only future compute ports) qos_com_utils.update_network_policy_binding( context, id, net_data[qos_consts.QOS_POLICY_ID]) return updated_net def _has_no_dhcp_enabled_subnet(self, context, network): # Check if there is no DHCP-enabled subnet in the network. for subnet in network.subnets: if subnet.enable_dhcp: return False return True def _has_single_dhcp_enabled_subnet(self, context, network): # Check if there is only one DHCP-enabled subnet in the network. count = 0 for subnet in network.subnets: if subnet.enable_dhcp: count += 1 if count > 1: return False return True if count == 1 else False def _enable_native_dhcp(self, context, network, subnet): # Enable native DHCP service on the backend for this network. # First create a Neutron DHCP port and use its assigned IP # address as the DHCP server address in an API call to create a # LogicalDhcpServer on the backend. Then create the corresponding # logical port for the Neutron port with DHCP attachment as the # LogicalDhcpServer UUID. # Delete obsolete settings if exist. This could happen when a # previous failed transaction was rolled back. But the backend # entries are still there. self._disable_native_dhcp(context, network['id']) # Get existing ports on subnet. existing_ports = super(NsxV3Plugin, self).get_ports( context, filters={'network_id': [network['id']], 'fixed_ips': {'subnet_id': [subnet['id']]}}) az = self.get_network_az(network) port_data = { "name": "", "admin_state_up": True, "device_id": az._native_dhcp_profile_uuid, "device_owner": const.DEVICE_OWNER_DHCP, "network_id": network['id'], "tenant_id": network["tenant_id"], "mac_address": const.ATTR_NOT_SPECIFIED, "fixed_ips": [{"subnet_id": subnet['id']}], psec.PORTSECURITY: False } # Create the DHCP port (on neutron only) and update its port security port = {'port': port_data} neutron_port = super(NsxV3Plugin, self).create_port(context, port) self._create_port_preprocess_security(context, port, port_data, neutron_port) net_tags = self.nsxlib.build_v3_tags_payload( network, resource_type='os-neutron-net-id', project_name=context.tenant_name) server_data = self.nsxlib.native_dhcp.build_server_config( network, subnet, neutron_port, net_tags, default_dns_nameservers=az.nameservers, default_dns_domain=az.dns_domain) server_data['dhcp_profile_id'] = az._native_dhcp_profile_uuid nsx_net_id = self._get_network_nsx_id(context, network['id']) port_tags = self.nsxlib.build_v3_tags_payload( neutron_port, resource_type='os-neutron-dport-id', project_name=context.tenant_name) dhcp_server = None dhcp_port_profiles = [] if not self._is_ens_tz_net(context, network['id']): dhcp_port_profiles.append(self._dhcp_profile) try: dhcp_server = self.nsxlib.dhcp_server.create(**server_data) LOG.debug("Created logical DHCP server %(server)s for network " "%(network)s", {'server': dhcp_server['id'], 'network': network['id']}) name = self._get_port_name(context, port_data) nsx_port = self.nsxlib.logical_port.create( nsx_net_id, dhcp_server['id'], tags=port_tags, name=name, attachment_type=nsxlib_consts.ATTACHMENT_DHCP, switch_profile_ids=dhcp_port_profiles) LOG.debug("Created DHCP logical port %(port)s for " "network %(network)s", {'port': nsx_port['id'], 'network': network['id']}) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error("Unable to create logical DHCP server for " "network %s", network['id']) if dhcp_server: self.nsxlib.dhcp_server.delete(dhcp_server['id']) super(NsxV3Plugin, self).delete_port( context, neutron_port['id']) try: # Add neutron_port_id -> nsx_port_id mapping to the DB. nsx_db.add_neutron_nsx_port_mapping( context.session, neutron_port['id'], nsx_net_id, nsx_port['id']) # Add neutron_net_id -> dhcp_service_id mapping to the DB. nsx_db.add_neutron_nsx_service_binding( context.session, network['id'], neutron_port['id'], nsxlib_consts.SERVICE_DHCP, dhcp_server['id']) except (db_exc.DBError, sql_exc.TimeoutError): with excutils.save_and_reraise_exception(): LOG.error("Failed to create mapping for DHCP port %s," "deleting port and logical DHCP server", neutron_port['id']) self.nsxlib.dhcp_server.delete(dhcp_server['id']) self._cleanup_port(context, neutron_port['id'], nsx_port['id']) # Configure existing ports to work with the new DHCP server try: for port_data in existing_ports: self._add_dhcp_binding(context, port_data) except Exception: LOG.error('Unable to create DHCP bindings for existing ports ' 'on subnet %s', subnet['id']) def _disable_native_dhcp(self, context, network_id): # Disable native DHCP service on the backend for this network. # First delete the DHCP port in this network. Then delete the # corresponding LogicalDhcpServer for this network. dhcp_service = nsx_db.get_nsx_service_binding( context.session, network_id, nsxlib_consts.SERVICE_DHCP) if not dhcp_service: return if dhcp_service['port_id']: try: self.delete_port(context, dhcp_service['port_id'], force_delete_dhcp=True) except Exception: # This could happen when the port has been manually deleted. LOG.error("Failed to delete DHCP port %(port)s for " "network %(network)s", {'port': dhcp_service['port_id'], 'network': network_id}) else: LOG.error("DHCP port is not configured for network %s", network_id) try: self.nsxlib.dhcp_server.delete(dhcp_service['nsx_service_id']) LOG.debug("Deleted logical DHCP server %(server)s for network " "%(network)s", {'server': dhcp_service['nsx_service_id'], 'network': network_id}) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error("Unable to delete logical DHCP server %(server)s " "for network %(network)s", {'server': dhcp_service['nsx_service_id'], 'network': network_id}) try: # Delete neutron_id -> dhcp_service_id mapping from the DB. nsx_db.delete_neutron_nsx_service_binding( context.session, network_id, nsxlib_consts.SERVICE_DHCP) # Delete all DHCP bindings under this DHCP server from the DB. nsx_db.delete_neutron_nsx_dhcp_bindings_by_service_id( context.session, dhcp_service['nsx_service_id']) except db_exc.DBError: with excutils.save_and_reraise_exception(): LOG.error("Unable to delete DHCP server mapping for " "network %s", network_id) def _validate_address_space(self, subnet): cidr = subnet.get('cidr') if (not validators.is_attr_set(cidr) or netaddr.IPNetwork(cidr).version != 4): return # Check if subnet overlaps with shared address space. # This is checked on the backend when attaching subnet to a router. if netaddr.IPSet([cidr]) & netaddr.IPSet(['100.64.0.0/10']): msg = _("Subnet overlaps with shared address space 100.64.0.0/10") LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _create_bulk_with_callback(self, resource, context, request_items, post_create_func=None, rollback_func=None): # This is a copy of the _create_bulk() in db_base_plugin_v2.py, # but extended with user-provided callback functions. objects = [] collection = "%ss" % resource items = request_items[collection] try: with db_api.context_manager.writer.using(context): for item in items: obj_creator = getattr(self, 'create_%s' % resource) obj = obj_creator(context, item) objects.append(obj) if post_create_func: # The user-provided post_create function is called # after a new object is created. post_create_func(obj) except Exception: if rollback_func: # The user-provided rollback function is called when an # exception occurred. for obj in objects: rollback_func(obj) # Note that the session.rollback() function is called here. # session.rollback() will invoke transaction.rollback() on # the transaction this session maintains. The latter will # deactive the transaction and clear the session's cache. # # But depending on where the exception occurred, # transaction.rollback() may have already been called # internally before reaching here. # # For example, if the exception happened under a # "with session.begin(subtransactions=True):" statement # anywhere in the middle of processing obj_creator(), # transaction.__exit__() will invoke transaction.rollback(). # Thus when the exception reaches here, the session's cache # is already empty. context.session.rollback() with excutils.save_and_reraise_exception(): LOG.error("An exception occurred while creating " "the %(resource)s:%(item)s", {'resource': resource, 'item': item}) return objects def _post_create_subnet(self, context, subnet): LOG.debug("Collect native DHCP entries for network %s", subnet['network_id']) dhcp_service = nsx_db.get_nsx_service_binding( context.session, subnet['network_id'], nsxlib_consts.SERVICE_DHCP) if dhcp_service: _net_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id( context.session, dhcp_service['port_id']) return {'nsx_port_id': nsx_port_id, 'nsx_service_id': dhcp_service['nsx_service_id']} def _rollback_subnet(self, subnet, dhcp_info): LOG.debug("Rollback native DHCP entries for network %s", subnet['network_id']) if dhcp_info: try: self.nsxlib.logical_port.delete(dhcp_info['nsx_port_id']) except Exception as e: LOG.error("Failed to delete logical port %(id)s " "during rollback. Exception: %(e)s", {'id': dhcp_info['nsx_port_id'], 'e': e}) try: self.nsxlib.dhcp_server.delete(dhcp_info['nsx_service_id']) except Exception as e: LOG.error("Failed to delete logical DHCP server %(id)s " "during rollback. Exception: %(e)s", {'id': dhcp_info['nsx_service_id'], 'e': e}) def create_subnet_bulk(self, context, subnets): # Maintain a local cache here because when the rollback function # is called, the cache in the session may have already been cleared. _subnet_dhcp_info = {} def _post_create(subnet): if subnet['enable_dhcp']: _subnet_dhcp_info[subnet['id']] = self._post_create_subnet( context, subnet) def _rollback(subnet): if subnet['enable_dhcp'] and subnet['id'] in _subnet_dhcp_info: self._rollback_subnet(subnet, _subnet_dhcp_info[subnet['id']]) del _subnet_dhcp_info[subnet['id']] if cfg.CONF.nsx_v3.native_dhcp_metadata: return self._create_bulk_with_callback('subnet', context, subnets, _post_create, _rollback) else: return self._create_bulk('subnet', context, subnets) def create_subnet(self, context, subnet): self._validate_address_space(subnet['subnet']) # TODO(berlin): public external subnet announcement if (cfg.CONF.nsx_v3.native_dhcp_metadata and subnet['subnet'].get('enable_dhcp', False)): self._validate_external_subnet(context, subnet['subnet']['network_id']) lock = 'nsxv3_network_' + subnet['subnet']['network_id'] with locking.LockManager.get_lock(lock): # Check if it is on an overlay network and is the first # DHCP-enabled subnet to create. if self._is_ddi_supported_on_network( context, subnet['subnet']['network_id']): network = self._get_network( context, subnet['subnet']['network_id']) if self._has_no_dhcp_enabled_subnet(context, network): created_subnet = super( NsxV3Plugin, self).create_subnet(context, subnet) self._extension_manager.process_create_subnet(context, subnet['subnet'], created_subnet) dhcp_relay = self.get_network_az_by_net_id( context, subnet['subnet']['network_id']).dhcp_relay_service if not dhcp_relay: self._enable_native_dhcp(context, network, created_subnet) msg = None else: msg = (_("Can not create more than one DHCP-enabled " "subnet in network %s") % subnet['subnet']['network_id']) else: msg = _("Native DHCP is not supported for non-overlay " "network %s") % subnet['subnet']['network_id'] if msg: LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) else: created_subnet = super(NsxV3Plugin, self).create_subnet( context, subnet) return created_subnet def delete_subnet(self, context, subnet_id): # TODO(berlin): cancel public external subnet announcement if cfg.CONF.nsx_v3.native_dhcp_metadata: # Ensure that subnet is not deleted if attached to router. self._subnet_check_ip_allocations_internal_router_ports( context, subnet_id) subnet = self.get_subnet(context, subnet_id) if subnet['enable_dhcp']: lock = 'nsxv3_network_' + subnet['network_id'] with locking.LockManager.get_lock(lock): # Check if it is the last DHCP-enabled subnet to delete. network = self._get_network(context, subnet['network_id']) if self._has_single_dhcp_enabled_subnet(context, network): try: self._disable_native_dhcp(context, network['id']) except Exception as e: LOG.error("Failed to disable native DHCP for" "network %(id)s. Exception: %(e)s", {'id': network['id'], 'e': e}) super(NsxV3Plugin, self).delete_subnet( context, subnet_id) return super(NsxV3Plugin, self).delete_subnet(context, subnet_id) def update_subnet(self, context, subnet_id, subnet): updated_subnet = None if cfg.CONF.nsx_v3.native_dhcp_metadata: orig_subnet = self.get_subnet(context, subnet_id) enable_dhcp = subnet['subnet'].get('enable_dhcp') if (enable_dhcp is not None and enable_dhcp != orig_subnet['enable_dhcp']): lock = 'nsxv3_network_' + orig_subnet['network_id'] with locking.LockManager.get_lock(lock): network = self._get_network( context, orig_subnet['network_id']) if enable_dhcp: if self._is_ddi_supported_on_network( context, orig_subnet['network_id']): if self._has_no_dhcp_enabled_subnet( context, network): updated_subnet = super( NsxV3Plugin, self).update_subnet( context, subnet_id, subnet) self._extension_manager.process_update_subnet( context, subnet['subnet'], updated_subnet) self._enable_native_dhcp(context, network, updated_subnet) msg = None else: msg = (_("Multiple DHCP-enabled subnets is " "not allowed in network %s") % orig_subnet['network_id']) else: msg = (_("Native DHCP is not supported for " "non-overlay network %s") % orig_subnet['network_id']) if msg: LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) elif self._has_single_dhcp_enabled_subnet(context, network): self._disable_native_dhcp(context, network['id']) updated_subnet = super( NsxV3Plugin, self).update_subnet( context, subnet_id, subnet) self._extension_manager.process_update_subnet( context, subnet['subnet'], updated_subnet) if not updated_subnet: updated_subnet = super(NsxV3Plugin, self).update_subnet( context, subnet_id, subnet) self._extension_manager.process_update_subnet( context, subnet['subnet'], updated_subnet) # Check if needs to update logical DHCP server for native DHCP. if (cfg.CONF.nsx_v3.native_dhcp_metadata and updated_subnet['enable_dhcp']): kwargs = {} for key in ('dns_nameservers', 'gateway_ip', 'host_routes'): if key in subnet['subnet']: value = subnet['subnet'][key] if value != orig_subnet[key]: kwargs[key] = value if key != 'dns_nameservers': kwargs['options'] = None if 'options' in kwargs: sr, gw_ip = self.nsxlib.native_dhcp.build_static_routes( updated_subnet.get('gateway_ip'), updated_subnet.get('cidr'), updated_subnet.get('host_routes', [])) kwargs['options'] = {'option121': {'static_routes': sr}} kwargs.pop('host_routes', None) if (gw_ip is not None and 'gateway_ip' not in kwargs and gw_ip != updated_subnet['gateway_ip']): kwargs['gateway_ip'] = gw_ip if kwargs: dhcp_service = nsx_db.get_nsx_service_binding( context.session, orig_subnet['network_id'], nsxlib_consts.SERVICE_DHCP) if dhcp_service: try: self.nsxlib.dhcp_server.update( dhcp_service['nsx_service_id'], **kwargs) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error( "Unable to update logical DHCP server " "%(server)s for network %(network)s", {'server': dhcp_service['nsx_service_id'], 'network': orig_subnet['network_id']}) if 'options' in kwargs: # Need to update the static binding of every VM in # this logical DHCP server. bindings = nsx_db.get_nsx_dhcp_bindings_by_service( context.session, dhcp_service['nsx_service_id']) for binding in bindings: port = self._get_port(context, binding['port_id']) dhcp_opts = port.get(ext_edo.EXTRADHCPOPTS) self._update_dhcp_binding_on_server( context, binding, port['mac_address'], binding['ip_address'], port['network_id'], gateway_ip=kwargs.get('gateway_ip', False), dhcp_opts=dhcp_opts, options=kwargs.get('options'), subnet=updated_subnet) if (cfg.CONF.nsx_v3.metadata_on_demand and not cfg.CONF.nsx_v3.native_dhcp_metadata): # If enable_dhcp is changed on a subnet attached to a router, # update internal metadata network accordingly. if 'enable_dhcp' in subnet['subnet']: port_filters = {'device_owner': const.ROUTER_INTERFACE_OWNERS, 'fixed_ips': {'subnet_id': [subnet_id]}} ports = self.get_ports(context, filters=port_filters) for port in ports: nsx_rpc.handle_router_metadata_access( self, context, port['device_id'], interface=not updated_subnet['enable_dhcp']) return updated_subnet def _build_address_bindings(self, port): address_bindings = [] for fixed_ip in port['fixed_ips']: # NOTE(arosen): nsx-v3 doesn't seem to handle ipv6 addresses # currently so for now we remove them here and do not pass # them to the backend which would raise an error. if netaddr.IPNetwork(fixed_ip['ip_address']).version == 6: continue address_bindings.append(nsx_resources.PacketAddressClassifier( fixed_ip['ip_address'], port['mac_address'], None)) for pair in port.get(addr_apidef.ADDRESS_PAIRS): address_bindings.append(nsx_resources.PacketAddressClassifier( pair['ip_address'], pair['mac_address'], None)) return address_bindings def _extend_get_network_dict_provider(self, context, network): self._extend_network_dict_provider(context, network) network[qos_consts.QOS_POLICY_ID] = (qos_com_utils. get_network_policy_id(context, network['id'])) def get_network(self, context, id, fields=None): with db_api.context_manager.reader.using(context): # Get network from Neutron database network = self._get_network(context, id) # Don't do field selection here otherwise we won't be able to add # provider networks fields net = self._make_network_dict(network, context=context) self._extend_get_network_dict_provider(context, net) return db_utils.resource_fields(net, fields) def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Get networks from Neutron database filters = filters or {} with db_api.context_manager.reader.using(context): networks = ( super(NsxV3Plugin, self).get_networks( context, filters, fields, sorts, limit, marker, page_reverse)) # Add provider network fields for net in networks: self._extend_get_network_dict_provider(context, net) return (networks if not fields else [db_utils.resource_fields(network, fields) for network in networks]) def _get_port_name(self, context, port_data): device_owner = port_data.get('device_owner') device_id = port_data.get('device_id') if device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF and device_id: router = self._get_router(context, device_id) name = utils.get_name_and_uuid( router['name'] or 'router', port_data['id'], tag='port') elif device_owner == const.DEVICE_OWNER_DHCP: network = self.get_network(context, port_data['network_id']) name = utils.get_name_and_uuid('%s-%s' % ( 'dhcp', network['name'] or 'network'), network['id']) elif device_owner.startswith(const.DEVICE_OWNER_COMPUTE_PREFIX): name = utils.get_name_and_uuid( port_data['name'] or 'instance-port', port_data['id']) else: name = port_data['name'] return name def _get_qos_profile_id(self, context, policy_id): switch_profile_id = nsx_db.get_switch_profile_by_qos_policy( context.session, policy_id) nsxlib_qos = self.nsxlib.qos_switching_profile qos_profile = nsxlib_qos.get(switch_profile_id) if qos_profile: profile_ids = nsxlib_qos.build_switch_profile_ids( self.nsxlib.switching_profile, qos_profile) if profile_ids and len(profile_ids) > 0: # We have only 1 QoS profile, so this array is of size 1 return profile_ids[0] # Didn't find it err_msg = _("Could not find QoS switching profile for policy " "%s") % policy_id LOG.error(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def _is_excluded_port(self, device_owner, port_security): if device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF: return False if device_owner == const.DEVICE_OWNER_DHCP: if not cfg.CONF.nsx_v3.native_dhcp_metadata: return True elif not port_security: return True return False def _create_port_at_the_backend(self, context, port_data, l2gw_port_check, psec_is_on): device_owner = port_data.get('device_owner') device_id = port_data.get('device_id') if device_owner == const.DEVICE_OWNER_DHCP: resource_type = 'os-neutron-dport-id' elif device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF: resource_type = 'os-neutron-rport-id' else: resource_type = 'os-neutron-port-id' tags = self.nsxlib.build_v3_tags_payload( port_data, resource_type=resource_type, project_name=context.tenant_name) resource_type = self._get_resource_type_for_device_id( device_owner, device_id) if resource_type: tags = nsxlib_utils.add_v3_tag(tags, resource_type, device_id) add_to_exclude_list = False if self._is_excluded_port(device_owner, psec_is_on): if self.nsxlib.feature_supported( nsxlib_consts.FEATURE_EXCLUDE_PORT_BY_TAG): tags.append({'scope': security.PORT_SG_SCOPE, 'tag': nsxlib_consts.EXCLUDE_PORT}) else: add_to_exclude_list = True elif self.nsxlib.feature_supported( nsxlib_consts.FEATURE_DYNAMIC_CRITERIA): # If port has no security-groups then we don't need to add any # security criteria tag. if port_data[ext_sg.SECURITYGROUPS]: tags += self.nsxlib.ns_group.get_lport_tags( port_data[ext_sg.SECURITYGROUPS] + port_data[provider_sg.PROVIDER_SECURITYGROUPS]) # Add port to the default list if (device_owner != l3_db.DEVICE_OWNER_ROUTER_INTF and device_owner != const.DEVICE_OWNER_DHCP): tags.append({'scope': security.PORT_SG_SCOPE, 'tag': NSX_V3_DEFAULT_SECTION}) address_bindings = (self._build_address_bindings(port_data) if psec_is_on else []) if not device_owner: # no attachment attachment_type = None vif_uuid = None elif l2gw_port_check: # Change the attachment type for L2 gateway owned ports. # NSX backend requires the vif id be set to bridge endpoint id # for ports plugged into a Bridge Endpoint. # Also set port security to False, since L2GW port does not have # an IP address. vif_uuid = device_id attachment_type = device_owner psec_is_on = False elif device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF: # no attachment change attachment_type = False vif_uuid = False else: # default attachment attachment_type = nsxlib_consts.ATTACHMENT_VIF vif_uuid = port_data['id'] profiles = [] # Add availability zone profiles first (so that specific profiles will # override them) port_az = self.get_network_az_by_net_id(context, port_data['network_id']) if port_az.switching_profiles_objs: profiles.extend(port_az.switching_profiles_objs) mac_learning_profile_set = False if psec_is_on: address_pairs = port_data.get(addr_apidef.ADDRESS_PAIRS) if validators.is_attr_set(address_pairs) and address_pairs: mac_learning_profile_set = True profiles.append(self._get_port_security_profile_id()) if device_owner == const.DEVICE_OWNER_DHCP: if not self._is_ens_tz_port(context, port_data): profiles.append(self._dhcp_profile) # Add QoS switching profile, if exists qos_policy_id = None if validators.is_attr_set(port_data.get(qos_consts.QOS_POLICY_ID)): qos_policy_id = port_data[qos_consts.QOS_POLICY_ID] elif device_owner.startswith(const.DEVICE_OWNER_COMPUTE_PREFIX): # check if the network of this port has a policy qos_policy_id = qos_com_utils.get_network_policy_id( context, port_data['network_id']) if qos_policy_id: qos_profile_id = self._get_qos_profile_id(context, qos_policy_id) profiles.append(qos_profile_id) # Add mac_learning profile if it exists and is configured if (self._mac_learning_profile and (mac_learning_profile_set or (validators.is_attr_set(port_data.get(mac_ext.MAC_LEARNING)) and port_data.get(mac_ext.MAC_LEARNING) is True))): profiles.append(self._mac_learning_profile) profiles.append(self._no_switch_security) name = self._get_port_name(context, port_data) nsx_net_id = port_data[pbin.VIF_DETAILS]['nsx-logical-switch-id'] try: result = self.nsxlib.logical_port.create( nsx_net_id, vif_uuid, tags=tags, name=name, admin_state=port_data['admin_state_up'], address_bindings=address_bindings, attachment_type=attachment_type, switch_profile_ids=profiles, description=port_data.get('description')) except nsx_lib_exc.ManagerError as inst: # we may fail if the QoS is not supported for this port # (for example - transport zone with KVM) LOG.exception("Unable to create port on the backend: %s", inst) msg = _("Unable to create port on the backend") raise nsx_exc.NsxPluginException(err_msg=msg) # Attach the policy to the port in the neutron DB if qos_policy_id: qos_com_utils.update_port_policy_binding(context, port_data['id'], qos_policy_id) # Add the port to the exclude list if necessary - this is if # the version is below 2.0.0 if add_to_exclude_list: self.nsxlib.firewall_section.add_member_to_fw_exclude_list( result['id'], nsxlib_consts.TARGET_TYPE_LOGICAL_PORT) return result def _validate_address_pairs(self, address_pairs): for pair in address_pairs: ip = pair.get('ip_address') if not utils.is_ipv4_ip_address(ip): raise nsx_exc.InvalidIPAddress(ip_address=ip) def _provider_sgs_specified(self, port_data): # checks if security groups were updated adding/modifying # security groups, port security is set and port has ip provider_sgs_specified = (validators.is_attr_set( port_data.get(provider_sg.PROVIDER_SECURITYGROUPS)) and port_data.get(provider_sg.PROVIDER_SECURITYGROUPS) != []) return provider_sgs_specified def _is_ens_tz_net(self, context, net_id): #Check the host-switch-mode of the TZ connected to network mappings = nsx_db.get_nsx_switch_ids(context.session, net_id) if mappings: nsx_net_id = nsx_net_id = mappings[0] if nsx_net_id: nsx_net = self.nsxlib.logical_switch.get(nsx_net_id) if nsx_net and nsx_net.get('transport_zone_id'): # Check the mode of this TZ mode = self.nsxlib.transport_zone.get_host_switch_mode( nsx_net['transport_zone_id']) return (mode == self.nsxlib.transport_zone.HOST_SWITCH_MODE_ENS) return False def _is_ens_tz_port(self, context, port_data): # Check the host-switch-mode of the TZ connected to the ports network return self._is_ens_tz_net(context, port_data['network_id']) def _create_port_preprocess_security( self, context, port, port_data, neutron_db): (port_security, has_ip) = self._determine_port_security_and_has_ip( context, port_data) port_data[psec.PORTSECURITY] = port_security # No port security is allowed if the port belongs to an ENS TZ if port_security and self._is_ens_tz_port(context, port_data): raise nsx_exc.NsxENSPortSecurity() self._process_port_port_security_create( context, port_data, neutron_db) # allowed address pair checks address_pairs = port_data.get(addr_apidef.ADDRESS_PAIRS) if validators.is_attr_set(address_pairs): if not port_security: raise addr_exc.AddressPairAndPortSecurityRequired() else: self._validate_address_pairs(address_pairs) self._process_create_allowed_address_pairs( context, neutron_db, address_pairs) else: # remove ATTR_NOT_SPECIFIED port_data[addr_apidef.ADDRESS_PAIRS] = [] if port_security and has_ip: self._ensure_default_security_group_on_port(context, port) (sgids, psgids) = self._get_port_security_groups_lists( context, port) elif (self._check_update_has_security_groups({'port': port_data}) or self._provider_sgs_specified(port_data) or self._get_provider_security_groups_on_port(context, port)): LOG.error("Port has conflicting port security status and " "security groups") raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() else: sgids = psgids = [] port_data[ext_sg.SECURITYGROUPS] = ( self._get_security_groups_on_port(context, port)) return port_security, has_ip, sgids, psgids def _assert_on_external_net_with_compute(self, port_data): # Prevent creating port with device owner prefix 'compute' # on external networks. device_owner = port_data.get('device_owner') if (device_owner is not None and device_owner.startswith(const.DEVICE_OWNER_COMPUTE_PREFIX)): err_msg = _("Unable to update/create a port with an external " "network") LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def _assert_on_dhcp_relay_without_router(self, context, port_data, original_port=None): # Prevent creating/updating port with device owner prefix 'compute' # on a subnet with dhcp relay but no router. if not original_port: original_port = port_data device_owner = port_data.get('device_owner') if (device_owner is None or not device_owner.startswith(const.DEVICE_OWNER_COMPUTE_PREFIX)): # not a compute port return if not self.get_network_az_by_net_id( context, original_port['network_id']).dhcp_relay_service: # No dhcp relay for the net of this port return # get the subnet id from the fixed ips of the port if 'fixed_ips' in port_data and port_data['fixed_ips']: subnet_id = port_data['fixed_ips'][0]['subnet_id'] elif 'fixed_ips' in original_port and original_port['fixed_ips']: subnet_id = original_port['fixed_ips'][0]['subnet_id'] else: return # check only dhcp enabled subnets subnet = self.get_subnet(context.elevated(), subnet_id) if not subnet['enable_dhcp']: return # check if the subnet is attached to a router port_filters = {'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF], 'network_id': [original_port['network_id']]} interfaces = self.get_ports(context.elevated(), filters=port_filters) router_found = False for interface in interfaces: if interface['fixed_ips'][0]['subnet_id'] == subnet_id: router_found = True break if not router_found: err_msg = _("Neutron is configured with DHCP_Relay but no router " "connected to the subnet") LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def _cleanup_port(self, context, port_id, lport_id): super(NsxV3Plugin, self).delete_port(context, port_id) if lport_id: self.nsxlib.logical_port.delete(lport_id) def _assert_on_external_net_port_with_qos(self, port_data): # Prevent creating/update port with QoS policy # on external networks. if validators.is_attr_set(port_data.get(qos_consts.QOS_POLICY_ID)): err_msg = _("Unable to update/create a port with an external " "network and a QoS policy") LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def _assert_on_illegal_port_with_qos(self, port_data, device_owner): # Prevent creating/update port with QoS policy # on router-interface/network-dhcp ports. if ((device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF or device_owner == const.DEVICE_OWNER_DHCP) and validators.is_attr_set(port_data.get(qos_consts.QOS_POLICY_ID))): err_msg = _("Unable to create or update %s port with a QoS " "policy") % device_owner LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def _assert_on_port_admin_state(self, port_data, device_owner): """Do not allow changing the admin state of some ports""" if (device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF or device_owner == l3_db.DEVICE_OWNER_ROUTER_GW): if port_data.get("admin_state_up") is False: err_msg = _("admin_state_up=False router ports are not " "supported.") LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def _assert_on_port_sec_change(self, port_data, device_owner): """Do not allow enabling port security of some ports Trusted ports are created with port security disabled in neutron, and it should not change. """ if nlib_net.is_port_trusted({'device_owner': device_owner}): if port_data.get(psec.PORTSECURITY) is True: err_msg = _("port_security_enabled=True is not supported for " "trusted ports") LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def _filter_ipv4_dhcp_fixed_ips(self, context, fixed_ips): ips = [] for fixed_ip in fixed_ips: if netaddr.IPNetwork(fixed_ip['ip_address']).version != 4: continue with db_api.context_manager.reader.using(context): subnet = self.get_subnet(context, fixed_ip['subnet_id']) if subnet['enable_dhcp']: ips.append(fixed_ip) return ips def _add_dhcp_binding(self, context, port): if not utils.is_port_dhcp_configurable(port): return dhcp_service = nsx_db.get_nsx_service_binding( context.session, port['network_id'], nsxlib_consts.SERVICE_DHCP) if not dhcp_service: return for fixed_ip in self._filter_ipv4_dhcp_fixed_ips( context, port['fixed_ips']): binding = self._add_dhcp_binding_on_server( context, dhcp_service['nsx_service_id'], fixed_ip['subnet_id'], fixed_ip['ip_address'], port) try: nsx_db.add_neutron_nsx_dhcp_binding( context.session, port['id'], fixed_ip['subnet_id'], fixed_ip['ip_address'], dhcp_service['nsx_service_id'], binding['id']) except (db_exc.DBError, sql_exc.TimeoutError): LOG.error("Failed to add mapping of DHCP binding " "%(binding)s for port %(port)s, deleting" "DHCP binding on server", {'binding': binding['id'], 'port': port['id']}) self._delete_dhcp_binding_on_server(context, binding) def _validate_extra_dhcp_options(self, opts): if not opts or not cfg.CONF.nsx_v3.native_dhcp_metadata: return for opt in opts: opt_name = opt['opt_name'] opt_val = opt['opt_value'] if opt_name == 'classless-static-route': # separate validation for option121 if opt_val is not None: try: net, ip = opt_val.split(',') except Exception: msg = (_("Bad value %(val)s for DHCP option " "%(name)s") % {'name': opt_name, 'val': opt_val}) raise n_exc.InvalidInput(error_message=msg) elif not self.nsxlib.dhcp_server.get_dhcp_opt_code(opt_name): msg = (_("DHCP option %s is not supported") % opt_name) raise n_exc.InvalidInput(error_message=msg) def _get_dhcp_options(self, context, ip, extra_dhcp_opts, net_id, subnet): # Always add option121. net_az = self.get_network_az_by_net_id(context, net_id) options = {'option121': {'static_routes': [ {'network': '%s' % net_az.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % net_az.native_metadata_route, 'next_hop': ip}]}} if subnet: sr, gateway_ip = self.nsxlib.native_dhcp.build_static_routes( subnet.get('gateway_ip'), subnet.get('cidr'), subnet.get('host_routes', [])) options['option121']['static_routes'].extend(sr) # Adding extra options only if configured on port if extra_dhcp_opts: other_opts = [] for opt in extra_dhcp_opts: opt_name = opt['opt_name'] if opt['opt_value'] is not None: # None value means - delete this option. Since we rebuild # the options from scratch, it can be ignored. opt_val = opt['opt_value'] if opt_name == 'classless-static-route': # Add to the option121 static routes net, ip = opt_val.split(',') options['option121']['static_routes'].append({ 'network': net, 'next_hop': ip}) else: other_opts.append({ 'code': self.nsxlib.dhcp_server.get_dhcp_opt_code( opt_name), 'values': [opt_val]}) if other_opts: options['others'] = other_opts return options def _add_dhcp_binding_on_server(self, context, dhcp_service_id, subnet_id, ip, port): try: hostname = 'host-%s' % ip.replace('.', '-') subnet = self.get_subnet(context, subnet_id) gateway_ip = subnet.get('gateway_ip') options = self._get_dhcp_options( context, ip, port.get(ext_edo.EXTRADHCPOPTS), port['network_id'], subnet) binding = self.nsxlib.dhcp_server.create_binding( dhcp_service_id, port['mac_address'], ip, hostname, cfg.CONF.nsx_v3.dhcp_lease_time, options, gateway_ip) LOG.debug("Created static binding (mac: %(mac)s, ip: %(ip)s, " "gateway: %(gateway)s, options: %(options)s) for port " "%(port)s on logical DHCP server %(server)s", {'mac': port['mac_address'], 'ip': ip, 'gateway': gateway_ip, 'options': options, 'port': port['id'], 'server': dhcp_service_id}) return binding except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error("Unable to create static binding (mac: %(mac)s, " "ip: %(ip)s, gateway: %(gateway)s, options: " "%(options)s) for port %(port)s on logical DHCP " "server %(server)s", {'mac': port['mac_address'], 'ip': ip, 'gateway': gateway_ip, 'options': options, 'port': port['id'], 'server': dhcp_service_id}) def _delete_dhcp_binding(self, context, port): # Do not check device_owner here because Nova may have already # deleted that before Neutron's port deletion. bindings = nsx_db.get_nsx_dhcp_bindings(context.session, port['id']) for binding in bindings: self._delete_dhcp_binding_on_server(context, binding) try: nsx_db.delete_neutron_nsx_dhcp_binding( context.session, binding['port_id'], binding['nsx_binding_id']) except db_exc.DBError: LOG.error("Unable to delete mapping of DHCP binding " "%(binding)s for port %(port)s", {'binding': binding['nsx_binding_id'], 'port': binding['port_id']}) def _delete_dhcp_binding_on_server(self, context, binding): try: self.nsxlib.dhcp_server.delete_binding( binding['nsx_service_id'], binding['nsx_binding_id']) LOG.debug("Deleted static binding for port %(port)s) on " "logical DHCP server %(server)s", {'port': binding['port_id'], 'server': binding['nsx_service_id']}) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error("Unable to delete static binding for port " "%(port)s) on logical DHCP server %(server)s", {'port': binding['port_id'], 'server': binding['nsx_service_id']}) def _find_dhcp_binding(self, subnet_id, ip_address, bindings): for binding in bindings: if (subnet_id == binding['subnet_id'] and ip_address == binding['ip_address']): return binding def _update_dhcp_binding(self, context, old_port, new_port): # First check if any IPv4 address in fixed_ips is changed. # Then update DHCP server setting or DHCP static binding # depending on the port type. # Note that Neutron allows a port with multiple IPs in the # same subnet. But backend DHCP server may not support that. if (utils.is_port_dhcp_configurable(old_port) != utils.is_port_dhcp_configurable(new_port)): # Note that the device_owner could be changed, # but still needs DHCP binding. if utils.is_port_dhcp_configurable(old_port): self._delete_dhcp_binding(context, old_port) else: self._add_dhcp_binding(context, new_port) return # Collect IPv4 DHCP addresses from original and updated fixed_ips # in the form of [(subnet_id, ip_address)]. old_fixed_ips = set([(fixed_ip['subnet_id'], fixed_ip['ip_address']) for fixed_ip in self._filter_ipv4_dhcp_fixed_ips( context, old_port['fixed_ips'])]) new_fixed_ips = set([(fixed_ip['subnet_id'], fixed_ip['ip_address']) for fixed_ip in self._filter_ipv4_dhcp_fixed_ips( context, new_port['fixed_ips'])]) # Find out the subnet/IP differences before and after the update. ips_to_add = list(new_fixed_ips - old_fixed_ips) ips_to_delete = list(old_fixed_ips - new_fixed_ips) ip_change = (ips_to_add or ips_to_delete) if old_port["device_owner"] == const.DEVICE_OWNER_DHCP and ip_change: # Update backend DHCP server address if the IP address of a DHCP # port is changed. if len(new_fixed_ips) != 1: msg = _("Can only configure one IP address on a DHCP server") LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) # Locate the backend DHCP server for this DHCP port. dhcp_service = nsx_db.get_nsx_service_binding( context.session, old_port['network_id'], nsxlib_consts.SERVICE_DHCP) if dhcp_service: new_ip = ips_to_add[0][1] try: self.nsxlib.dhcp_server.update( dhcp_service['nsx_service_id'], server_ip=new_ip) LOG.debug("Updated IP %(ip)s for logical DHCP server " "%(server)s", {'ip': new_ip, 'server': dhcp_service['nsx_service_id']}) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error("Unable to update IP %(ip)s for logical " "DHCP server %(server)s", {'ip': new_ip, 'server': dhcp_service['nsx_service_id']}) elif utils.is_port_dhcp_configurable(old_port): # Update static DHCP bindings for a compute port. bindings = nsx_db.get_nsx_dhcp_bindings(context.session, old_port['id']) dhcp_opts = new_port.get(ext_edo.EXTRADHCPOPTS) dhcp_opts_changed = (old_port[ext_edo.EXTRADHCPOPTS] != new_port[ext_edo.EXTRADHCPOPTS]) if ip_change: # If IP address is changed, update associated DHCP bindings, # metadata route, and default hostname. # Mac address (if changed) will be updated at the same time. if ([subnet_id for (subnet_id, ip) in ips_to_add] == [subnet_id for (subnet_id, ip) in ips_to_delete]): # No change on subnet_id, just update corresponding IPs. for i, (subnet_id, ip) in enumerate(ips_to_delete): binding = self._find_dhcp_binding(subnet_id, ip, bindings) if binding: subnet = self.get_subnet(context, binding['subnet_id']) self._update_dhcp_binding_on_server( context, binding, new_port['mac_address'], ips_to_add[i][1], old_port['network_id'], dhcp_opts=dhcp_opts, subnet=subnet) # Update DB IP nsx_db.update_nsx_dhcp_bindings(context.session, old_port['id'], ip, ips_to_add[i][1]) else: for (subnet_id, ip) in ips_to_delete: binding = self._find_dhcp_binding(subnet_id, ip, bindings) if binding: self._delete_dhcp_binding_on_server(context, binding) if ips_to_add: dhcp_service = nsx_db.get_nsx_service_binding( context.session, new_port['network_id'], nsxlib_consts.SERVICE_DHCP) if dhcp_service: for (subnet_id, ip) in ips_to_add: self._add_dhcp_binding_on_server( context, dhcp_service['nsx_service_id'], subnet_id, ip, new_port) elif (old_port['mac_address'] != new_port['mac_address'] or dhcp_opts_changed): # If only Mac address/dhcp opts is changed, # update it in all associated DHCP bindings. for binding in bindings: subnet = self.get_subnet(context, binding['subnet_id']) self._update_dhcp_binding_on_server( context, binding, new_port['mac_address'], binding['ip_address'], old_port['network_id'], dhcp_opts=dhcp_opts, subnet=subnet) def _update_dhcp_binding_on_server(self, context, binding, mac, ip, net_id, gateway_ip=False, dhcp_opts=None, options=None, subnet=None): try: data = {'mac_address': mac, 'ip_address': ip} if ip != binding['ip_address']: data['host_name'] = 'host-%s' % ip.replace('.', '-') data['options'] = self._get_dhcp_options( context, ip, dhcp_opts, net_id, subnet) elif (dhcp_opts is not None or options is not None): data['options'] = self._get_dhcp_options( context, ip, dhcp_opts, net_id, subnet) if gateway_ip is not False: # Note that None is valid for gateway_ip, means deleting it. data['gateway_ip'] = gateway_ip self.nsxlib.dhcp_server.update_binding( binding['nsx_service_id'], binding['nsx_binding_id'], **data) LOG.debug("Updated static binding (mac: %(mac)s, ip: %(ip)s, " "gateway: %(gateway)s) for port %(port)s on " "logical DHCP server %(server)s", {'mac': mac, 'ip': ip, 'gateway': gateway_ip, 'port': binding['port_id'], 'server': binding['nsx_service_id']}) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error("Unable to update static binding (mac: %(mac)s, " "ip: %(ip)s, gateway: %(gateway)s) for port " "%(port)s on logical DHCP server %(server)s", {'mac': mac, 'ip': ip, 'gateway': gateway_ip, 'port': binding['port_id'], 'server': binding['nsx_service_id']}) def _update_lport_with_security_groups(self, context, lport_id, original, updated): # translate the neutron sg ids to nsx ids, and call nsxlib nsx_origial = nsx_db.get_nsx_security_group_ids(context.session, original) nsx_updated = nsx_db.get_nsx_security_group_ids(context.session, updated) self.nsxlib.ns_group.update_lport( context, lport_id, nsx_origial, nsx_updated) def base_create_port(self, context, port): neutron_db = super(NsxV3Plugin, self).create_port(context, port) self._extension_manager.process_create_port( context, port['port'], neutron_db) return neutron_db def create_port(self, context, port, l2gw_port_check=False): port_data = port['port'] dhcp_opts = port_data.get(ext_edo.EXTRADHCPOPTS) self._validate_extra_dhcp_options(dhcp_opts) self._validate_max_ips_per_port(port_data.get('fixed_ips', []), port_data.get('device_owner')) self._assert_on_dhcp_relay_without_router(context, port_data) # TODO(salv-orlando): Undo logical switch creation on failure with db_api.context_manager.writer.using(context): is_external_net = self._network_is_external( context, port_data['network_id']) if is_external_net: self._assert_on_external_net_with_compute(port_data) self._assert_on_external_net_port_with_qos(port_data) self._assert_on_illegal_port_with_qos( port_data, port_data.get('device_owner')) self._assert_on_port_admin_state( port_data, port_data.get('device_owner')) neutron_db = self.base_create_port(context, port) port["port"].update(neutron_db) (is_psec_on, has_ip, sgids, psgids) = ( self._create_port_preprocess_security(context, port, port_data, neutron_db)) self._process_portbindings_create_and_update( context, port['port'], port_data) self._process_port_create_extra_dhcp_opts( context, port_data, dhcp_opts) # handle adding security groups to port self._process_port_create_security_group( context, port_data, sgids) self._process_port_create_provider_security_group( context, port_data, psgids) # add provider groups to other security groups list. # sgids is a set() so we need to | it in. if psgids: sgids = list(set(sgids) | set(psgids)) self._extend_nsx_port_dict_binding(context, port_data) # Make sure mac_learning and port sec are not both enabled if (validators.is_attr_set(port_data.get(mac_ext.MAC_LEARNING)) and port_data.get(mac_ext.MAC_LEARNING)): if is_psec_on: msg = _('Mac learning requires that port security be ' 'disabled') LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) self._create_mac_learning_state(context, port_data) elif mac_ext.MAC_LEARNING in port_data: # This is due to the fact that the default is # ATTR_NOT_SPECIFIED port_data.pop(mac_ext.MAC_LEARNING) # Operations to backend should be done outside of DB transaction. # NOTE(arosen): ports on external networks are nat rules and do # not result in ports on the backend. if not is_external_net: try: lport = self._create_port_at_the_backend( context, port_data, l2gw_port_check, is_psec_on) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error('Failed to create port %(id)s on NSX ' 'backend. Exception: %(e)s', {'id': neutron_db['id'], 'e': e}) self._cleanup_port(context, neutron_db['id'], None) if not self.nsxlib.feature_supported( nsxlib_consts.FEATURE_DYNAMIC_CRITERIA): try: self._update_lport_with_security_groups( context, lport['id'], [], sgids or []) except Exception as e: with excutils.save_and_reraise_exception(reraise=False): LOG.debug("Couldn't associate port %s with " "one or more security-groups, reverting " "logical-port creation (%s).", port_data['id'], lport['id']) self._cleanup_port( context, neutron_db['id'], lport['id']) # NOTE(arosen): this is to translate between nsxlib # exceptions and the plugin exceptions. This should be # later refactored. if (e.__class__ is nsx_lib_exc.SecurityGroupMaximumCapacityReached): raise nsx_exc.SecurityGroupMaximumCapacityReached( err_msg=e.msg) else: raise e try: net_id = port_data[pbin.VIF_DETAILS]['nsx-logical-switch-id'] nsx_db.add_neutron_nsx_port_mapping( context.session, neutron_db['id'], net_id, lport['id']) except Exception as e: with excutils.save_and_reraise_exception(): LOG.debug('Failed to update mapping %s on NSX ' 'backend. Reverting port creation. ' 'Exception: %s', neutron_db['id'], e) self._cleanup_port(context, neutron_db['id'], lport['id']) # this extra lookup is necessary to get the # latest db model for the extension functions port_model = self._get_port(context, port_data['id']) resource_extend.apply_funcs('ports', port_data, port_model) self._remove_provider_security_groups_from_list(port_data) # Add Mac/IP binding to native DHCP server and neutron DB. if cfg.CONF.nsx_v3.native_dhcp_metadata: try: self._add_dhcp_binding(context, port_data) except nsx_lib_exc.ManagerError: # Rollback create port self.delete_port(context, port_data['id'], force_delete_dhcp=True) msg = _('Unable to create port. Please contact admin') LOG.exception(msg) raise nsx_exc.NsxPluginException(err_msg=msg) if not cfg.CONF.nsx_v3.native_dhcp_metadata: nsx_rpc.handle_port_metadata_access(self, context, neutron_db) kwargs = {'context': context, 'port': neutron_db} registry.notify(resources.PORT, events.AFTER_CREATE, self, **kwargs) return port_data def _pre_delete_port_check(self, context, port_id, l2gw_port_check): """Perform checks prior to deleting a port.""" try: kwargs = { 'context': context, 'port_check': l2gw_port_check, 'port_id': port_id, } # Send delete port notification to any interested service plugin registry.notify( resources.PORT, events.BEFORE_DELETE, self, **kwargs) except callback_exc.CallbackFailure as e: if len(e.errors) == 1: raise e.errors[0].error raise n_exc.ServicePortInUse(port_id=port_id, reason=e) def delete_port(self, context, port_id, l3_port_check=True, l2gw_port_check=True, force_delete_dhcp=False): # if needed, check to see if this is a port owned by # a l2 gateway. If so, we should prevent deletion here self._pre_delete_port_check(context, port_id, l2gw_port_check) # if needed, check to see if this is a port owned by # a l3 router. If so, we should prevent deletion here if l3_port_check: self.prevent_l3_port_deletion(context, port_id) port = self.get_port(context, port_id) # Prevent DHCP port deletion if native support is enabled if (cfg.CONF.nsx_v3.native_dhcp_metadata and not force_delete_dhcp and port['device_owner'] in [const.DEVICE_OWNER_DHCP]): msg = (_('Can not delete DHCP port %s') % port['id']) raise n_exc.BadRequest(resource='port', msg=msg) if not self._network_is_external(context, port['network_id']): _net_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id( context.session, port_id) self.nsxlib.logical_port.delete(nsx_port_id) if not self.nsxlib.feature_supported( nsxlib_consts.FEATURE_DYNAMIC_CRITERIA): self._update_lport_with_security_groups( context, nsx_port_id, port.get(ext_sg.SECURITYGROUPS, []), []) if (not self.nsxlib.feature_supported( nsxlib_consts.FEATURE_EXCLUDE_PORT_BY_TAG) and self._is_excluded_port(port.get('device_owner'), port.get('port_security_enabled'))): fs = self.nsxlib.firewall_section try: fs.remove_member_from_fw_exclude_list( nsx_port_id, nsxlib_consts.TARGET_TYPE_LOGICAL_PORT) except Exception as e: LOG.warning("Unable to remove port from exclude list. " "Reason: %s", e) self.disassociate_floatingips(context, port_id) # Remove Mac/IP binding from native DHCP server and neutron DB. if cfg.CONF.nsx_v3.native_dhcp_metadata: self._delete_dhcp_binding(context, port) else: nsx_rpc.handle_port_metadata_access(self, context, port, is_delete=True) super(NsxV3Plugin, self).delete_port(context, port_id) def _update_port_preprocess_security( self, context, port, id, updated_port, validate_port_sec=True): delete_addr_pairs = self._check_update_deletes_allowed_address_pairs( port) has_addr_pairs = self._check_update_has_allowed_address_pairs(port) has_security_groups = self._check_update_has_security_groups(port) delete_security_groups = self._check_update_deletes_security_groups( port) # populate port_security setting port_data = port['port'] if psec.PORTSECURITY not in port_data: updated_port[psec.PORTSECURITY] = \ self._get_port_security_binding(context, id) has_ip = self._ip_on_port(updated_port) # validate port security and allowed address pairs if not updated_port[psec.PORTSECURITY]: # has address pairs in request if has_addr_pairs: raise addr_exc.AddressPairAndPortSecurityRequired() elif not delete_addr_pairs: # check if address pairs are in db updated_port[addr_apidef.ADDRESS_PAIRS] = ( self.get_allowed_address_pairs(context, id)) if updated_port[addr_apidef.ADDRESS_PAIRS]: raise addr_exc.AddressPairAndPortSecurityRequired() if delete_addr_pairs or has_addr_pairs: self._validate_address_pairs( updated_port[addr_apidef.ADDRESS_PAIRS]) # delete address pairs and read them in self._delete_allowed_address_pairs(context, id) self._process_create_allowed_address_pairs( context, updated_port, updated_port[addr_apidef.ADDRESS_PAIRS]) # No port security is allowed if the port belongs to an ENS TZ if (updated_port[psec.PORTSECURITY] and psec.PORTSECURITY in port_data and self._is_ens_tz_port(context, updated_port)): raise nsx_exc.NsxENSPortSecurity() # checks if security groups were updated adding/modifying # security groups, port security is set and port has ip provider_sgs_specified = self._provider_sgs_specified(updated_port) if (validate_port_sec and not (has_ip and updated_port[psec.PORTSECURITY])): if has_security_groups or provider_sgs_specified: LOG.error("Port has conflicting port security status and " "security groups") raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() # Update did not have security groups passed in. Check # that port does not have any security groups already on it. filters = {'port_id': [id]} security_groups = ( super(NsxV3Plugin, self)._get_port_security_group_bindings( context, filters) ) if security_groups and not delete_security_groups: raise psec_exc.PortSecurityPortHasSecurityGroup() if delete_security_groups or has_security_groups: # delete the port binding and read it with the new rules. self._delete_port_security_group_bindings(context, id) sgids = self._get_security_groups_on_port(context, port) self._process_port_create_security_group(context, updated_port, sgids) if psec.PORTSECURITY in port['port']: self._process_port_port_security_update( context, port['port'], updated_port) return updated_port def _get_resource_type_for_device_id(self, device_owner, device_id): if device_owner in const.ROUTER_INTERFACE_OWNERS: return 'os-router-uuid' elif device_owner.startswith(const.DEVICE_OWNER_COMPUTE_PREFIX): return 'os-instance-uuid' def _update_port_on_backend(self, context, lport_id, original_port, updated_port, address_bindings, switch_profile_ids): original_device_owner = original_port.get('device_owner') original_device_id = original_port.get('device_id') updated_device_owner = updated_port.get('device_owner') updated_device_id = updated_port.get('device_id') tags_update = [] if original_device_id != updated_device_id: # Determine if we need to update or drop the tag. If the # updated_device_id exists then the tag will be updated. This # is done using the updated port. If the updated_device_id does # not exist then we need to get the original resource type # from original_device_owner. This enables us to drop the tag. if updated_device_id: resource_type = self._get_resource_type_for_device_id( updated_device_owner, updated_device_id) else: resource_type = self._get_resource_type_for_device_id( original_device_owner, updated_device_id) if resource_type: tags_update = nsxlib_utils.add_v3_tag( tags_update, resource_type, updated_device_id) if updated_device_owner in (original_device_owner, l3_db.DEVICE_OWNER_ROUTER_INTF, nsxlib_consts.BRIDGE_ENDPOINT): # no attachment change attachment_type = False vif_uuid = False elif updated_device_owner: # default attachment attachment_type = nsxlib_consts.ATTACHMENT_VIF vif_uuid = updated_port['id'] else: # no attachment attachment_type = None vif_uuid = None name = self._get_port_name(context, updated_port) # Update exclude list if necessary updated_ps = updated_port.get('port_security_enabled') updated_excluded = self._is_excluded_port(updated_device_owner, updated_ps) original_ps = original_port.get('port_security_enabled') original_excluded = self._is_excluded_port(original_device_owner, original_ps) if updated_excluded != original_excluded: if self.nsxlib.feature_supported( nsxlib_consts.FEATURE_EXCLUDE_PORT_BY_TAG): if updated_excluded: tags_update.append({'scope': security.PORT_SG_SCOPE, 'tag': nsxlib_consts.EXCLUDE_PORT}) else: tags_update.append({'scope': security.PORT_SG_SCOPE, 'tag': None}) else: fs = self.nsxlib.firewall_section if updated_excluded: fs.add_member_to_fw_exclude_list( lport_id, nsxlib_consts.TARGET_TYPE_LOGICAL_PORT) else: fs.remove_member_from_fw_exclude_list( lport_id, nsxlib_consts.TARGET_TYPE_LOGICAL_PORT) if self.nsxlib.feature_supported( nsxlib_consts.FEATURE_DYNAMIC_CRITERIA): tags_update += self.nsxlib.ns_group.get_lport_tags( updated_port.get(ext_sg.SECURITYGROUPS, []) + updated_port.get(provider_sg.PROVIDER_SECURITYGROUPS, [])) # Only set the default section tag if there is no port security if not updated_excluded: tags_update.append({'scope': security.PORT_SG_SCOPE, 'tag': NSX_V3_DEFAULT_SECTION}) else: # Ensure that the 'exclude' tag is set if self.nsxlib.feature_supported( nsxlib_consts.FEATURE_EXCLUDE_PORT_BY_TAG): tags_update.append({'scope': security.PORT_SG_SCOPE, 'tag': nsxlib_consts.EXCLUDE_PORT}) else: self._update_lport_with_security_groups( context, lport_id, original_port.get(ext_sg.SECURITYGROUPS, []) + original_port.get(provider_sg.PROVIDER_SECURITYGROUPS, []), updated_port.get(ext_sg.SECURITYGROUPS, []) + updated_port.get(provider_sg.PROVIDER_SECURITYGROUPS, [])) # Add availability zone profiles first (so that specific profiles will # override them) port_az = self.get_network_az_by_net_id(context, updated_port['network_id']) if port_az.switching_profiles_objs: switch_profile_ids = (port_az.switching_profiles_objs + switch_profile_ids) # Update the DHCP profile if (updated_device_owner == const.DEVICE_OWNER_DHCP and not self._is_ens_tz_net(context, updated_port['network_id'])): switch_profile_ids.append(self._dhcp_profile) # Update QoS switch profile orig_compute = original_device_owner.startswith( const.DEVICE_OWNER_COMPUTE_PREFIX) updated_compute = updated_device_owner.startswith( const.DEVICE_OWNER_COMPUTE_PREFIX) is_new_compute = updated_compute and not orig_compute qos_policy_id, qos_profile_id = self._get_port_qos_ids(context, updated_port, is_new_compute) if qos_profile_id is not None: switch_profile_ids.append(qos_profile_id) psec_is_on = self._get_port_security_profile_id() in switch_profile_ids address_pairs = updated_port.get(addr_apidef.ADDRESS_PAIRS) mac_learning_profile_set = ( validators.is_attr_set(address_pairs) and address_pairs and psec_is_on) # Add mac_learning profile if it exists and is configured if (self._mac_learning_profile and (mac_learning_profile_set or updated_port.get(mac_ext.MAC_LEARNING) is True)): switch_profile_ids.append(self._mac_learning_profile) switch_profile_ids.append(self._no_switch_security) try: self.nsxlib.logical_port.update( lport_id, vif_uuid, name=name, attachment_type=attachment_type, admin_state=updated_port.get('admin_state_up'), address_bindings=address_bindings, switch_profile_ids=switch_profile_ids, tags_update=tags_update, description=updated_port.get('description')) except nsx_lib_exc.ManagerError as inst: # we may fail if the QoS is not supported for this port # (for example - transport zone with KVM) LOG.exception("Unable to update port on the backend: %s", inst) msg = _("Unable to update port on the backend") raise nsx_exc.NsxPluginException(err_msg=msg) # Attach/Detach the QoS policies to the port in the neutron DB qos_com_utils.update_port_policy_binding(context, updated_port['id'], qos_policy_id) def _get_port_qos_ids(self, context, updated_port, is_new_compute): # when a port is updated, get the current QoS policy/profile ids policy_id = None profile_id = None if (qos_consts.QOS_POLICY_ID in updated_port): policy_id = updated_port[qos_consts.QOS_POLICY_ID] else: # Look for the previous QoS policy policy_id = qos_com_utils.get_port_policy_id( context, updated_port['id']) # If the port is now a 'compute' port (attached to a vm) and # Qos policy was not configured on the port directly, # try to take it from the ports network if policy_id is None and is_new_compute: # check if the network of this port has a policy policy_id = qos_com_utils.get_network_policy_id( context, updated_port.get('network_id')) if policy_id is not None: profile_id = self._get_qos_profile_id(context, policy_id) return policy_id, profile_id def update_port(self, context, id, port): switch_profile_ids = None # Need to determine if we skip validations for port security. # This is the edge case when the subnet is deleted. validate_port_sec = True fixed_ips = port['port'].get('fixed_ips', []) for fixed_ip in fixed_ips: if 'delete_subnet' in fixed_ip: validate_port_sec = False break with db_api.context_manager.writer.using(context): original_port = super(NsxV3Plugin, self).get_port(context, id) self._remove_provider_security_groups_from_list(original_port) port_data = port['port'] nsx_lswitch_id, nsx_lport_id = nsx_db.get_nsx_switch_and_port_id( context.session, id) is_external_net = self._network_is_external( context, original_port['network_id']) if is_external_net: self._assert_on_external_net_with_compute(port_data) self._assert_on_external_net_port_with_qos(port_data) self._assert_on_dhcp_relay_without_router(context, port_data, original_port) dhcp_opts = port_data.get(ext_edo.EXTRADHCPOPTS) self._validate_extra_dhcp_options(dhcp_opts) device_owner = (port_data['device_owner'] if 'device_owner' in port_data else original_port.get('device_owner')) self._assert_on_illegal_port_with_qos( port_data, device_owner) self._assert_on_port_admin_state(port_data, device_owner) self._assert_on_port_sec_change(port_data, device_owner) self._validate_max_ips_per_port( port_data.get('fixed_ips', []), device_owner) updated_port = super(NsxV3Plugin, self).update_port(context, id, port) self._extension_manager.process_update_port(context, port_data, updated_port) # copy values over - except fixed_ips as # they've already been processed port_data.pop('fixed_ips', None) updated_port.update(port_data) updated_port = self._update_port_preprocess_security( context, port, id, updated_port, validate_port_sec) self._update_extra_dhcp_opts_on_port(context, id, port, updated_port) sec_grp_updated = self.update_security_group_on_port( context, id, port, original_port, updated_port) self._process_port_update_provider_security_group( context, port, original_port, updated_port) (port_security, has_ip) = self._determine_port_security_and_has_ip( context, updated_port) self._process_portbindings_create_and_update( context, port_data, updated_port) self._extend_nsx_port_dict_binding(context, updated_port) mac_learning_state = updated_port.get(mac_ext.MAC_LEARNING) if mac_learning_state is not None: if port_security and mac_learning_state: msg = _('Mac learning requires that port security be ' 'disabled') LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) self._update_mac_learning_state(context, id, mac_learning_state) self._remove_provider_security_groups_from_list(updated_port) address_bindings = self._build_address_bindings(updated_port) if port_security and address_bindings: switch_profile_ids = [self._get_port_security_profile_id()] else: switch_profile_ids = [self._no_psec_profile_id] address_bindings = [] # update the port in the backend, only if it exists in the DB # (i.e not external net) if nsx_lport_id is not None: try: self._update_port_on_backend(context, nsx_lport_id, original_port, updated_port, address_bindings, switch_profile_ids) except (nsx_lib_exc.ManagerError, nsx_lib_exc.SecurityGroupMaximumCapacityReached) as e: # In case if there is a failure on NSX-v3 backend, rollback the # previous update operation on neutron side. LOG.exception("Unable to update NSX backend, rolling back " "changes on neutron") with excutils.save_and_reraise_exception(reraise=False): with db_api.context_manager.writer.using(context): super(NsxV3Plugin, self).update_port( context, id, {'port': original_port}) # revert allowed address pairs if port_security: orig_pair = original_port.get( addr_apidef.ADDRESS_PAIRS) updated_pair = updated_port.get( addr_apidef.ADDRESS_PAIRS) if orig_pair != updated_pair: self._delete_allowed_address_pairs(context, id) if orig_pair: self._process_create_allowed_address_pairs( context, original_port, orig_pair) if sec_grp_updated: self.update_security_group_on_port( context, id, {'port': original_port}, updated_port, original_port) # NOTE(arosen): this is to translate between nsxlib # exceptions and the plugin exceptions. This should be # later refactored. if (e.__class__ is nsx_lib_exc.SecurityGroupMaximumCapacityReached): raise nsx_exc.SecurityGroupMaximumCapacityReached( err_msg=e.msg) else: raise e # Update DHCP bindings. if cfg.CONF.nsx_v3.native_dhcp_metadata: self._update_dhcp_binding(context, original_port, updated_port) # Notifications must be sent after the above transaction is complete kwargs = { 'context': context, 'port': updated_port, 'mac_address_updated': False, 'original_port': original_port, } registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs) return updated_port def _extend_get_port_dict_qos_and_binding(self, context, port): # Not using the register api for this because we need the context self._extend_nsx_port_dict_binding(context, port) # add the qos policy id from the DB if 'id' in port: port[qos_consts.QOS_POLICY_ID] = qos_com_utils.get_port_policy_id( context, port['id']) def get_port(self, context, id, fields=None): port = super(NsxV3Plugin, self).get_port(context, id, fields=None) if 'id' in port: port_model = self._get_port(context, port['id']) resource_extend.apply_funcs('ports', port, port_model) self._extend_get_port_dict_qos_and_binding(context, port) self._remove_provider_security_groups_from_list(port) return db_utils.resource_fields(port, fields) def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} with db_api.context_manager.reader.using(context): ports = ( super(NsxV3Plugin, self).get_ports( context, filters, fields, sorts, limit, marker, page_reverse)) # Add port extensions for port in ports: if 'id' in port: port_model = self._get_port(context, port['id']) resource_extend.apply_funcs('ports', port, port_model) self._extend_get_port_dict_qos_and_binding(context, port) self._remove_provider_security_groups_from_list(port) return (ports if not fields else [db_utils.resource_fields(port, fields) for port in ports]) def _get_external_attachment_info(self, context, router): gw_port = router.gw_port ipaddress = None netmask = None nexthop = None if gw_port: # gw_port may have multiple IPs, only configure the first one if gw_port.get('fixed_ips'): ipaddress = gw_port['fixed_ips'][0]['ip_address'] network_id = gw_port.get('network_id') if network_id: ext_net = self._get_network(context, network_id) if not ext_net.external: msg = (_("Network '%s' is not a valid external " "network") % network_id) raise n_exc.BadRequest(resource='router', msg=msg) if ext_net.subnets: ext_subnet = ext_net.subnets[0] netmask = str(netaddr.IPNetwork(ext_subnet.cidr).netmask) nexthop = ext_subnet.gateway_ip return (ipaddress, netmask, nexthop) def _get_tier0_uuid_by_router(self, context, router): network_id = router.gw_port_id and router.gw_port.network_id if not network_id: return network = self.get_network(context, network_id) if not network.get(pnet.PHYSICAL_NETWORK): return self._default_tier0_router else: return network.get(pnet.PHYSICAL_NETWORK) def _update_router_gw_info(self, context, router_id, info): router = self._get_router(context, router_id) org_tier0_uuid = self._get_tier0_uuid_by_router(context, router) org_enable_snat = router.enable_snat orgaddr, orgmask, _orgnexthop = ( self._get_external_attachment_info( context, router)) # Ensure that a router cannot have SNAT disabled if there are # floating IP's assigned if (info and 'enable_snat' in info and org_enable_snat != info.get('enable_snat') and info.get('enable_snat') is False and self.router_gw_port_has_floating_ips(context, router_id)): msg = _("Unable to set SNAT disabled. Floating IPs assigned.") raise n_exc.InvalidInput(error_message=msg) # TODO(berlin): For nonat use case, we actually don't need a gw port # which consumes one external ip. But after looking at the DB logic # and we need to make a big change so don't touch it at present. super(NsxV3Plugin, self)._update_router_gw_info( context, router_id, info, router=router) new_tier0_uuid = self._get_tier0_uuid_by_router(context, router) new_enable_snat = router.enable_snat newaddr, newmask, _newnexthop = ( self._get_external_attachment_info( context, router)) nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) # Remove router link port between tier1 and tier0 if tier0 router link # is removed or changed remove_router_link_port = (org_tier0_uuid and (not new_tier0_uuid or org_tier0_uuid != new_tier0_uuid)) # Remove SNAT rules for gw ip if gw ip is deleted/changed or # enable_snat is updated from True to False remove_snat_rules = (org_enable_snat and orgaddr and (newaddr != orgaddr or not new_enable_snat)) # Revocate bgp announce for nonat subnets if tier0 router link is # changed or enable_snat is updated from False to True revocate_bgp_announce = (not org_enable_snat and org_tier0_uuid and (new_tier0_uuid != org_tier0_uuid or new_enable_snat)) # Add router link port between tier1 and tier0 if tier0 router link is # added or changed to a new one add_router_link_port = (new_tier0_uuid and (not org_tier0_uuid or org_tier0_uuid != new_tier0_uuid)) # Add SNAT rules for gw ip if gw ip is add/changed or # enable_snat is updated from False to True add_snat_rules = (new_enable_snat and newaddr and (newaddr != orgaddr or not org_enable_snat)) # Bgp announce for nonat subnets if tier0 router link is changed or # enable_snat is updated from True to False bgp_announce = (not new_enable_snat and new_tier0_uuid and (new_tier0_uuid != org_tier0_uuid or not org_enable_snat)) # Advertise NAT routes if enable SNAT to support FIP. In the NoNAT # use case, only NSX connected routes need to be advertised. advertise_route_nat_flag = True if new_enable_snat else False advertise_route_connected_flag = True if not new_enable_snat else False if revocate_bgp_announce: # TODO(berlin): revocate bgp announce on org tier0 router pass if remove_snat_rules: self.nsxlib.router.delete_gw_snat_rules(nsx_router_id, orgaddr) if remove_router_link_port: self.nsxlib.router.remove_router_link_port( nsx_router_id, org_tier0_uuid) if add_router_link_port: # First update edge cluster info for router edge_cluster_uuid = self._get_edge_cluster(new_tier0_uuid) self.nsxlib.router.update_router_edge_cluster( nsx_router_id, edge_cluster_uuid) tags = self.nsxlib.build_v3_tags_payload( router, resource_type='os-neutron-rport', project_name=context.tenant_name) self.nsxlib.router.add_router_link_port(nsx_router_id, new_tier0_uuid, tags=tags) if add_snat_rules: # Add SNAT rules for all the subnets which are in different scope # than the gw gw_address_scope = self._get_network_address_scope( context, router.gw_port.network_id) subnets = self._find_router_subnets(context.elevated(), router_id) for subnet in subnets: self._add_subnet_snat_rule(context, router_id, nsx_router_id, subnet, gw_address_scope, newaddr) if bgp_announce: # TODO(berlin): bgp announce on new tier0 router pass self.nsxlib.router.update_advertisement(nsx_router_id, advertise_route_nat_flag, advertise_route_connected_flag) def _add_subnet_snat_rule(self, context, router_id, nsx_router_id, subnet, gw_address_scope, gw_ip): # if the subnets address scope is the same as the gateways: # no need for SNAT if gw_address_scope: subnet_address_scope = self._get_subnetpool_address_scope( context, subnet['subnetpool_id']) if (gw_address_scope == subnet_address_scope): LOG.info("No need for SNAT rule for router %(router)s " "and subnet %(subnet)s because they use the " "same address scope %(addr_scope)s.", {'router': router_id, 'subnet': subnet['id'], 'addr_scope': gw_address_scope}) return self.nsxlib.router.add_gw_snat_rule(nsx_router_id, gw_ip, source_net=subnet['cidr'], bypass_firewall=False) def _process_extra_attr_router_create(self, context, router_db, r): for extra_attr in l3_attrs_db.get_attr_info().keys(): if (extra_attr in r and validators.is_attr_set(r.get(extra_attr))): self.set_extra_attr_value(context, router_db, extra_attr, r[extra_attr]) def _assert_on_router_admin_state(self, router_data): if router_data.get("admin_state_up") is False: err_msg = _("admin_state_up=False routers are not supported.") LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def validate_router_dhcp_relay(self, context): """Fail router creation dhcp relay is configured without IPAM""" if (self._availability_zones_data.dhcp_relay_configured() and cfg.CONF.ipam_driver == 'internal'): err_msg = _("Neutron is configured with DHCP_Relay but no IPAM " "plugin configured.") LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def create_router(self, context, router): r = router['router'] self.validate_router_dhcp_relay(context) # validate the availability zone if az_def.AZ_HINTS in r: self._validate_availability_zones_forced(context, 'router', r[az_def.AZ_HINTS]) gw_info = self._extract_external_gw(context, router, is_extract=True) r['id'] = (r.get('id') or uuidutils.generate_uuid()) tags = self.nsxlib.build_v3_tags_payload( r, resource_type='os-neutron-router-id', project_name=context.tenant_name) router = super(NsxV3Plugin, self).create_router(context, router) if az_def.AZ_HINTS in r: # Update the AZ hints in the neutron object az_hints = az_validator.convert_az_list_to_string( r[az_def.AZ_HINTS]) super(NsxV3Plugin, self).update_router( context, router['id'], {'router': {az_def.AZ_HINTS: az_hints}}) router_db = self._get_router(context, r['id']) with db_api.context_manager.writer.using(context): self._process_extra_attr_router_create(context, router_db, r) # Create backend entries here in case neutron DB exception # occurred during super.create_router(), which will cause # API retry and leaves dangling backend entries. try: result = self.nsxlib.logical_router.create( display_name=utils.get_name_and_uuid( router['name'] or 'router', router['id']), description=router.get('description'), tags=tags) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error("Unable to create logical router for " "neutron router %s", router['id']) self.delete_router(context, router['id']) try: nsx_db.add_neutron_nsx_router_mapping( context.session, router['id'], result['id']) except db_exc.DBError: with excutils.save_and_reraise_exception(): LOG.error("Unable to create router mapping for " "router %s", router['id']) self.delete_router(context, router['id']) if gw_info and gw_info != const.ATTR_NOT_SPECIFIED: try: self._update_router_gw_info(context, router['id'], gw_info) except (db_exc.DBError, nsx_lib_exc.ManagerError): with excutils.save_and_reraise_exception(): LOG.error("Failed to set gateway info for router " "being created: %s - removing router", router['id']) self.delete_router(context, router['id']) LOG.info("Create router failed while setting external " "gateway. Router:%s has been removed from " "DB and backend", router['id']) return self.get_router(context, router['id']) def delete_router(self, context, router_id): if not cfg.CONF.nsx_v3.native_dhcp_metadata: nsx_rpc.handle_router_metadata_access(self, context, router_id, interface=None) router = self.get_router(context, router_id) if router.get(l3_apidef.EXTERNAL_GW_INFO): self._update_router_gw_info(context, router_id, {}) nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) ret_val = super(NsxV3Plugin, self).delete_router(context, router_id) # if delete was called due to create error, there might not be a # backend id if not nsx_router_id: return ret_val # Remove logical router from the NSX backend # It is safe to do now as db-level checks for resource deletion were # passed (and indeed the resource was removed from the Neutron DB try: self.nsxlib.logical_router.delete(nsx_router_id, force=True) except nsx_lib_exc.ResourceNotFound: # If the logical router was not found on the backend do not worry # about it. The conditions has already been logged, so there is no # need to do further logging pass except nsx_lib_exc.ManagerError: # if there is a failure in deleting the router do not fail the # operation, especially since the router object has already been # removed from the neutron DB. Take corrective steps to ensure the # resulting zombie object does not forward any traffic and is # eventually removed. LOG.warning("Backend router deletion for neutron router %s " "failed. The object was however removed from the " "Neutron database", router_id) return ret_val def get_router_availability_zones(self, router): """Return availability zones which a router belongs to.""" # add the hints to the structure first l3_attrs_db.ExtraAttributesMixin._extend_extra_router_dict( router, router) # get the availability zones from the hints return [self.get_router_az(router).name] def _validate_ext_routes(self, context, router_id, gw_info, new_routes): ext_net_id = (gw_info['network_id'] if validators.is_attr_set(gw_info) and gw_info else None) if not ext_net_id: port_filters = {'device_id': [router_id], 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_GW]} gw_ports = self.get_ports(context, filters=port_filters) if gw_ports: ext_net_id = gw_ports[0]['network_id'] if ext_net_id: subnets = self._get_subnets_by_network(context, ext_net_id) ext_cidrs = [subnet['cidr'] for subnet in subnets] for route in new_routes: if netaddr.all_matching_cidrs( route['nexthop'], ext_cidrs): error_message = (_("route with destination %(dest)s have " "an external nexthop %(nexthop)s which " "can't be supported") % {'dest': route['destination'], 'nexthop': route['nexthop']}) LOG.error(error_message) raise n_exc.InvalidInput(error_message=error_message) def _update_router_wrapper(self, context, router_id, router): if cfg.CONF.api_replay_mode: # Only import mock if the reply mode is used import mock # NOTE(arosen): the mock.patch here is needed for api_replay_mode with mock.patch("neutron.plugins.common.utils._fixup_res_dict", side_effect=api_replay_utils._fixup_res_dict): return super(NsxV3Plugin, self).update_router( context, router_id, router) else: return super(NsxV3Plugin, self).update_router( context, router_id, router) def update_router(self, context, router_id, router): gw_info = self._extract_external_gw(context, router, is_extract=False) router_data = router['router'] self._assert_on_router_admin_state(router_data) # if setting this router as no-snat, make sure gw address scope match # those of the subnets if (validators.is_attr_set(gw_info) and not gw_info.get('enable_snat', cfg.CONF.enable_snat_by_default)): router_ports = self._get_router_interfaces(context, router_id) for port in router_ports: for fip in port['fixed_ips']: self._validate_address_scope_for_router_interface( context.elevated(), router_id, gw_info['network_id'], fip['subnet_id']) # VPNaaS need to be notified on router GW changes (there is currently # no matching upstream registration for this) if validators.is_attr_set(gw_info): vpn_plugin = directory.get_plugin(plugin_const.VPN) if vpn_plugin: vpn_driver = vpn_plugin.drivers[vpn_plugin.default_provider] vpn_driver.validate_router_gw_info(context, router_id, gw_info) nsx_router_id = None routes_added = [] routes_removed = [] try: if 'routes' in router_data: new_routes = router_data['routes'] self._validate_ext_routes(context, router_id, gw_info, new_routes) self._validate_routes(context, router_id, new_routes) old_routes = self._get_extra_routes_by_router_id( context, router_id) routes_added, routes_removed = helpers.diff_list_of_dict( old_routes, new_routes) nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) for route in routes_removed: self.nsxlib.router.delete_static_routes(nsx_router_id, route) for route in routes_added: self.nsxlib.router.add_static_routes(nsx_router_id, route) if 'name' in router_data: # Update the name of logical router. router_name = router_data['name'] or 'router' display_name = utils.get_name_and_uuid(router_name, router_id) nsx_router_id = nsx_router_id or nsx_db.get_nsx_router_id( context.session, router_id) self.nsxlib.logical_router.update(nsx_router_id, display_name=display_name) # Update the name of associated logical ports. filters = {'device_id': [router_id], 'device_owner': const.ROUTER_INTERFACE_OWNERS} ports = self.get_ports(context, filters=filters) for port in ports: nsx_s_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id( context.session, port['id']) if nsx_port_id: name = utils.get_name_and_uuid( router_name, port['id'], tag='port') try: self.nsxlib.logical_port.update(nsx_port_id, None, name=name) except Exception as e: LOG.error("Unable to update port %(port_id)s. " "Reason: %(e)s", {'port_id': nsx_port_id, 'e': e}) if 'description' in router_data: nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) self.nsxlib.logical_router.update( nsx_router_id, description=router_data['description']) return self._update_router_wrapper(context, router_id, router) except nsx_lib_exc.ResourceNotFound: with db_api.context_manager.writer.using(context): router_db = self._get_router(context, router_id) router_db['status'] = const.NET_STATUS_ERROR raise nsx_exc.NsxPluginException( err_msg=(_("logical router %s not found at the backend") % router_id)) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): router_db = self._get_router(context, router_id) curr_status = router_db['status'] router_db['status'] = const.NET_STATUS_ERROR if nsx_router_id: for route in routes_added: self.nsxlib.router.delete_static_routes( nsx_router_id, route) for route in routes_removed: self.nsxlib.router.add_static_routes(nsx_router_id, route) router_db['status'] = curr_status def _get_nsx_router_and_fw_section(self, context, router_id): # find the backend router id in the DB nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) if nsx_router_id is None: msg = _("Didn't find nsx router for router %s") % router_id LOG.error(msg) raise nsx_exc.NsxPluginException(err_msg=msg) # get the FW section id of the backend router try: section_id = self.nsxlib.logical_router.get_firewall_section_id( nsx_router_id) except Exception as e: msg = (_("Failed to find router firewall section for router " "%(id)s: %(e)s") % {'id': router_id, 'e': e}) LOG.error(msg) raise nsx_exc.NsxPluginException(err_msg=msg) if section_id is None: msg = (_("Failed to find router firewall section for router " "%(id)s.") % {'id': router_id}) LOG.error(msg) raise nsx_exc.NsxPluginException(err_msg=msg) return nsx_router_id, section_id def update_router_firewall(self, context, router_id): """Rewrite all the rules in the router edge firewall This method should be called on FWaaS v1/v2 updates, and on router interfaces changes. When FWaaS is disabled, there is no need to update the NSX router FW, as the default rule is allow-all. """ if (self.fwaas_callbacks and self.fwaas_callbacks.fwaas_enabled): # find all the relevant ports of the router for FWaaS v2 # TODO(asarfaty): Add vm ports as well ports = self._get_router_interfaces(context, router_id) nsx_router_id, section_id = self._get_nsx_router_and_fw_section( context, router_id) # let the fwaas callbacks update the router FW return self.fwaas_callbacks.update_router_firewall( context, self.nsxlib, router_id, ports, nsx_router_id, section_id) def _get_port_relay_servers(self, context, port_id, network_id=None): if not network_id: port = self.get_port(context, port_id) network_id = port['network_id'] net_az = self.get_network_az_by_net_id(context, network_id) return net_az.dhcp_relay_servers def _get_port_relay_services(self): # DHCP services: UDP 67, 68, 2535 #TODO(asarfaty): use configurable ports service1 = self.nsxlib.firewall_section.get_nsservice( nsxlib_consts.L4_PORT_SET_NSSERVICE, l4_protocol=nsxlib_consts.UDP, destination_ports=['67-68']) service2 = self.nsxlib.firewall_section.get_nsservice( nsxlib_consts.L4_PORT_SET_NSSERVICE, l4_protocol=nsxlib_consts.UDP, destination_ports=['2535']) return [service1, service2] def get_extra_fw_rules(self, context, router_id, port_id=None): """Return firewall rules that should be added to the router firewall This method should return a list of allow firewall rules that are required in order to enable different plugin features with north/south traffic. The returned rules will be added after the FWaaS rules, and before the default drop rule. if port_id is specified, only rules relevant for this router interface port should be returned, and the rules should be ingress/egress (but not both) and include the source/dest nsx logical port. """ extra_rules = [] # DHCP relay rules: # get the list of relevant relay servers elv_ctx = context.elevated() if port_id: relay_servers = self._get_port_relay_servers(elv_ctx, port_id) else: relay_servers = [] filters = {'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF], 'device_id': [router_id]} ports = self.get_ports(elv_ctx, filters=filters) for port in ports: port_relay_servers = self._get_port_relay_servers( elv_ctx, port['id'], network_id=port['network_id']) if port_relay_servers: relay_servers.extend(port_relay_servers) # Add rules to allow dhcp traffic relay servers if relay_servers: # if it is a single port, the source/dest is this logical switch if port_id: nsx_ls_id, _nsx_port_id = nsx_db.get_nsx_switch_and_port_id( context.session, port_id) port_target = [{'target_type': 'LogicalSwitch', 'target_id': nsx_ls_id}] else: port_target = None # translate the relay server ips to the firewall format relay_target = [] if self.fwaas_callbacks: relay_target = (self.fwaas_callbacks.fwaas_driver. translate_addresses_to_target(set(relay_servers))) dhcp_services = self._get_port_relay_services() # ingress rule extra_rules.append({ 'display_name': "DHCP Relay ingress traffic", 'action': nsxlib_consts.FW_ACTION_ALLOW, 'sources': relay_target, 'destinations': port_target, 'services': dhcp_services, 'direction': 'IN'}) # egress rule extra_rules.append({ 'display_name': "DHCP Relay egress traffic", 'action': nsxlib_consts.FW_ACTION_ALLOW, 'destinations': relay_target, 'sources': port_target, 'services': dhcp_services, 'direction': 'OUT'}) # VPN rules: vpn_plugin = directory.get_plugin(plugin_const.VPN) if vpn_plugin: vpn_driver = vpn_plugin.drivers[vpn_plugin.default_provider] vpn_rules = ( vpn_driver._generate_ipsecvpn_firewall_rules( self.plugin_type(), context, router_id=router_id)) if vpn_rules: extra_rules.extend(vpn_rules) return extra_rules def _get_ports_and_address_groups(self, context, router_id, network_id, exclude_sub_ids=None): exclude_sub_ids = [] if not exclude_sub_ids else exclude_sub_ids address_groups = [] ports = self._get_router_interface_ports_by_network( context, router_id, network_id) ports = [port for port in ports if port['fixed_ips'] and port['fixed_ips'][0]['subnet_id'] not in exclude_sub_ids] for port in ports: address_group = {} gateway_ip = port['fixed_ips'][0]['ip_address'] subnet = self.get_subnet(context, port['fixed_ips'][0]['subnet_id']) prefixlen = str(netaddr.IPNetwork(subnet['cidr']).prefixlen) address_group['ip_addresses'] = [gateway_ip] address_group['prefix_length'] = prefixlen address_groups.append(address_group) return (ports, address_groups) def _get_interface_network(self, context, interface_info): is_port, is_sub = self._validate_interface_info(interface_info) if is_port: net_id = self.get_port(context, interface_info['port_id'])['network_id'] elif is_sub: net_id = self.get_subnet(context, interface_info['subnet_id'])['network_id'] return net_id def _validate_multiple_subnets_routers(self, context, router_id, net_id): network = self.get_network(context, net_id) net_type = network.get(pnet.NETWORK_TYPE) if (net_type and not self.nsxlib.feature_supported( nsxlib_consts.FEATURE_VLAN_ROUTER_INTERFACE) and not self._is_overlay_network(context, net_id)): err_msg = (_("Only overlay networks can be attached to a logical " "router. Network %(net_id)s is a %(net_type)s based " "network") % {'net_id': net_id, 'net_type': net_type}) LOG.error(err_msg) raise n_exc.InvalidInput(error_message=err_msg) port_filters = {'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF], 'network_id': [net_id]} intf_ports = self.get_ports(context.elevated(), filters=port_filters) router_ids = [port['device_id'] for port in intf_ports if port['device_id']] if len(router_ids) > 0: err_msg = _("Only one subnet of network %(net_id)s can be " "attached to router, one subnet is already attached " "to router %(router_id)s") % { 'net_id': net_id, 'router_id': router_ids[0]} LOG.error(err_msg) if router_id in router_ids: # attach to the same router again raise n_exc.InvalidInput(error_message=err_msg) else: # attach to multiple routers raise l3_exc.RouterInterfaceAttachmentConflict(reason=err_msg) def _add_router_interface_wrapper(self, context, router_id, interface_info): if cfg.CONF.api_replay_mode: # Only import mock if the reply mode is used import mock # NOTE(arosen): the mock.patch here is needed for api_replay_mode with mock.patch("neutron.plugins.common.utils._fixup_res_dict", side_effect=api_replay_utils._fixup_res_dict): return super(NsxV3Plugin, self).add_router_interface( context, router_id, interface_info) else: return super(NsxV3Plugin, self).add_router_interface( context, router_id, interface_info) def add_router_interface(self, context, router_id, interface_info): net_id = self._get_interface_network(context, interface_info) with locking.LockManager.get_lock(str(net_id)): # disallow more than one subnets belong to same network being # attached to routers self._validate_multiple_subnets_routers(context, router_id, net_id) info = self._add_router_interface_wrapper(context, router_id, interface_info) try: subnet = self.get_subnet(context, info['subnet_ids'][0]) port = self.get_port(context, info['port_id']) network_id = subnet['network_id'] nsx_net_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id( context.session, port['id']) router_db = self._get_router(context, router_id) gw_network_id = (router_db.gw_port.network_id if router_db.gw_port else None) # If it is a no-snat router, interface address scope must be the # same as the gateways if not router_db.enable_snat and gw_network_id: self._validate_address_scope_for_router_interface( context.elevated(), router_id, gw_network_id, subnet['id']) nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) _ports, address_groups = self._get_ports_and_address_groups( context, router_id, network_id) display_name = utils.get_name_and_uuid( subnet['name'] or 'subnet', subnet['id']) tags = self.nsxlib.build_v3_tags_payload( port, resource_type='os-neutron-rport-id', project_name=context.tenant_name) tags.append({'scope': 'os-subnet-id', 'tag': subnet['id']}) # Add the dhcp relay service to the NSX interface relay_service = None if subnet['enable_dhcp']: net_az = self.get_network_az_by_net_id(context, network_id) relay_service = net_az.dhcp_relay_service resource_type = None if not self._is_overlay_network(context, network_id): # The router can only be configured to be centralized if # GW network is attached if not gw_network_id: msg = _("A router attached to a VLAN backed network " "must have a external network assigned.") raise n_exc.InvalidInput(error_message=msg) resource_type = nsxlib_consts.LROUTERPORT_CENTRALIZED self.nsxlib.router.create_logical_router_intf_port_by_ls_id( logical_router_id=nsx_router_id, display_name=display_name, tags=tags, ls_id=nsx_net_id, logical_switch_port_id=nsx_port_id, address_groups=address_groups, relay_service_uuid=relay_service, resource_type=resource_type) if router_db.gw_port and not router_db.enable_snat: # TODO(berlin): Announce the subnet on tier0 if enable_snat # is False pass if not cfg.CONF.nsx_v3.native_dhcp_metadata: # Ensure the NSX logical router has a connection to a # 'metadata access' network (with a proxy listening on # its DHCP port), by creating it if needed. nsx_rpc.handle_router_metadata_access(self, context, router_id, interface=info) # add the SNAT rule for this interface if (router_db.enable_snat and gw_network_id and router_db.gw_port.get('fixed_ips')): gw_ip = router_db.gw_port['fixed_ips'][0]['ip_address'] gw_address_scope = self._get_network_address_scope( context, gw_network_id) self._add_subnet_snat_rule(context, router_id, nsx_router_id, subnet, gw_address_scope, gw_ip) # update firewall rules self.update_router_firewall(context, router_id) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Neutron failed to add_router_interface on " "router %s, and would try to rollback.", router_id) self.remove_router_interface( context, router_id, interface_info) return info def remove_router_interface(self, context, router_id, interface_info): subnet = None subnet_id = None port_id = None self._validate_interface_info(interface_info, for_removal=True) if 'port_id' in interface_info: port_id = interface_info['port_id'] # find subnet_id - it is need for removing the SNAT rule port = self._get_port(context, port_id) if port.get('fixed_ips'): subnet_id = port['fixed_ips'][0]['subnet_id'] self._confirm_router_interface_not_in_use( context, router_id, subnet_id) if not (port['device_owner'] in const.ROUTER_INTERFACE_OWNERS and port['device_id'] == router_id): raise l3_exc.RouterInterfaceNotFound( router_id=router_id, port_id=port_id) elif 'subnet_id' in interface_info: subnet_id = interface_info['subnet_id'] self._confirm_router_interface_not_in_use( context, router_id, subnet_id) subnet = self._get_subnet(context, subnet_id) rport_qry = context.session.query(models_v2.Port) ports = rport_qry.filter_by( device_id=router_id, device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF, network_id=subnet['network_id']) for p in ports: if p['fixed_ips'][0]['subnet_id'] == subnet_id: port_id = p['id'] break else: raise l3_exc.RouterInterfaceNotFoundForSubnet( router_id=router_id, subnet_id=subnet_id) try: # TODO(berlin): Revocate announce the subnet on tier0 if # enable_snat is False router_db = self._get_router(context, router_id) if router_db.gw_port and not router_db.enable_snat: pass nsx_net_id, _nsx_port_id = nsx_db.get_nsx_switch_and_port_id( context.session, port_id) subnet = self.get_subnet(context, subnet_id) ports, address_groups = self._get_ports_and_address_groups( context, router_id, subnet['network_id'], exclude_sub_ids=[subnet['id']]) nsx_router_id = nsx_db.get_nsx_router_id( context.session, router_id) if len(ports) >= 1: new_using_port_id = ports[0]['id'] _net_id, new_nsx_port_id = nsx_db.get_nsx_switch_and_port_id( context.session, new_using_port_id) self.nsxlib.logical_router_port.update_by_lswitch_id( nsx_router_id, nsx_net_id, linked_logical_switch_port_id={ 'target_id': new_nsx_port_id}, subnets=address_groups) else: self.nsxlib.logical_router_port.delete_by_lswitch_id( nsx_net_id) # try to delete the SNAT rule of this subnet if (router_db.gw_port and router_db.enable_snat and router_db.gw_port.get('fixed_ips')): gw_ip = router_db.gw_port['fixed_ips'][0]['ip_address'] self.nsxlib.router.delete_gw_snat_rule_by_source( nsx_router_id, gw_ip, subnet['cidr'], skip_not_found=True) except nsx_lib_exc.ResourceNotFound: LOG.error("router port on router %(router_id)s for net " "%(net_id)s not found at the backend", {'router_id': router_id, 'net_id': subnet['network_id']}) # inform the FWaaS that interface port was removed if self.fwaas_callbacks: self.fwaas_callbacks.delete_port(context, port_id) info = super(NsxV3Plugin, self).remove_router_interface( context, router_id, interface_info) if not cfg.CONF.nsx_v3.native_dhcp_metadata: # Ensure the connection to the 'metadata access network' is removed # (with the network) if this is the last DHCP-disabled subnet on # the router. nsx_rpc.handle_router_metadata_access(self, context, router_id) # update firewall rules self.update_router_firewall(context, router_id) return info def _update_lb_vip(self, port, vip_address): # update the load balancer virtual server's VIP with # floating ip, but don't add NAT rules device_id = port['device_id'] lb_tag = [{'scope': 'os-lbaas-lb-id', 'tag': device_id}] vs_list = self.nsxlib.search_by_tags( tags=lb_tag, resource_type='LbVirtualServer') if vs_list['results']: vs_client = self.nsxlib.load_balancer.virtual_server for vs in vs_list['results']: vs_client.update_virtual_server_with_vip(vs['id'], vip_address) def _create_floating_ip_wrapper(self, context, floatingip): if cfg.CONF.api_replay_mode: # Only import mock if the reply mode is used import mock # NOTE(arosen): the mock.patch here is needed for api_replay_mode with mock.patch("neutron.plugins.common.utils._fixup_res_dict", side_effect=api_replay_utils._fixup_res_dict): return super(NsxV3Plugin, self).create_floatingip( context, floatingip, initial_status=( const.FLOATINGIP_STATUS_ACTIVE if floatingip['floatingip']['port_id'] else const.FLOATINGIP_STATUS_DOWN)) else: return super(NsxV3Plugin, self).create_floatingip( context, floatingip, initial_status=( const.FLOATINGIP_STATUS_ACTIVE if floatingip['floatingip']['port_id'] else const.FLOATINGIP_STATUS_DOWN)) def create_floatingip(self, context, floatingip): new_fip = self._create_floating_ip_wrapper(context, floatingip) router_id = new_fip['router_id'] if not router_id: return new_fip port_id = floatingip['floatingip']['port_id'] if port_id: port_data = self.get_port(context, port_id) device_owner = port_data.get('device_owner') fip_address = new_fip['floating_ip_address'] if device_owner == const.DEVICE_OWNER_LOADBALANCERV2: try: self._update_lb_vip(port_data, fip_address) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): super(NsxV3Plugin, self).delete_floatingip( context, new_fip['id']) return new_fip try: nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) self.nsxlib.router.add_fip_nat_rules( nsx_router_id, new_fip['floating_ip_address'], new_fip['fixed_ip_address'], bypass_firewall=False) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): self.delete_floatingip(context, new_fip['id']) return new_fip def delete_floatingip(self, context, fip_id): fip = self.get_floatingip(context, fip_id) router_id = fip['router_id'] port_id = fip['port_id'] is_lb_port = False if port_id: port_data = self.get_port(context, port_id) device_owner = port_data.get('device_owner') fixed_ip_address = fip['fixed_ip_address'] if device_owner == const.DEVICE_OWNER_LOADBALANCERV2: # If the port is LB VIP port, after deleting the FIP, # update the virtual server VIP back to fixed IP. is_lb_port = True try: self._update_lb_vip(port_data, fixed_ip_address) except nsx_lib_exc.ManagerError as e: LOG.error("Exception when updating vip ip_address" "on vip_port %(port)s: %(err)s", {'port': port_id, 'err': e}) if router_id and not is_lb_port: try: nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) self.nsxlib.router.delete_fip_nat_rules( nsx_router_id, fip['floating_ip_address'], fip['fixed_ip_address']) except nsx_lib_exc.ResourceNotFound: LOG.warning("Backend NAT rules for fip: %(fip_id)s " "(ext_ip: %(ext_ip)s int_ip: %(int_ip)s) " "not found", {'fip_id': fip_id, 'ext_ip': fip['floating_ip_address'], 'int_ip': fip['fixed_ip_address']}) super(NsxV3Plugin, self).delete_floatingip(context, fip_id) def update_floatingip(self, context, fip_id, floatingip): old_fip = self.get_floatingip(context, fip_id) old_port_id = old_fip['port_id'] new_status = (const.FLOATINGIP_STATUS_ACTIVE if floatingip['floatingip'].get('port_id') else const.FLOATINGIP_STATUS_DOWN) new_fip = super(NsxV3Plugin, self).update_floatingip( context, fip_id, floatingip) router_id = new_fip['router_id'] new_port_id = new_fip['port_id'] try: is_lb_port = False if old_port_id: old_port_data = self.get_port(context, old_port_id) old_device_owner = old_port_data['device_owner'] old_fixed_ip = old_fip['fixed_ip_address'] if old_device_owner == const.DEVICE_OWNER_LOADBALANCERV2: is_lb_port = True self._update_lb_vip(old_port_data, old_fixed_ip) # Delete old router's fip rules if old_router_id is not None. if old_fip['router_id'] and not is_lb_port: try: old_nsx_router_id = nsx_db.get_nsx_router_id( context.session, old_fip['router_id']) self.nsxlib.router.delete_fip_nat_rules( old_nsx_router_id, old_fip['floating_ip_address'], old_fip['fixed_ip_address']) except nsx_lib_exc.ResourceNotFound: LOG.warning("Backend NAT rules for fip: %(fip_id)s " "(ext_ip: %(ext_ip)s int_ip: %(int_ip)s) " "not found", {'fip_id': old_fip['id'], 'ext_ip': old_fip['floating_ip_address'], 'int_ip': old_fip['fixed_ip_address']}) # Update LB VIP if the new port is LB port is_lb_port = False if new_port_id: new_port_data = self.get_port(context, new_port_id) new_device_owner = new_port_data['device_owner'] new_fip_address = new_fip['floating_ip_address'] if new_device_owner == const.DEVICE_OWNER_LOADBALANCERV2: is_lb_port = True self._update_lb_vip(new_port_data, new_fip_address) # TODO(berlin): Associating same FIP to different internal IPs # would lead to creating multiple times of FIP nat rules at the # backend. Let's see how to fix the problem latter. # Update current router's nat rules if router_id is not None. if router_id and not is_lb_port: nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) self.nsxlib.router.add_fip_nat_rules( nsx_router_id, new_fip['floating_ip_address'], new_fip['fixed_ip_address'], bypass_firewall=False) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): super(NsxV3Plugin, self).update_floatingip( context, fip_id, {'floatingip': {'port_id': old_port_id}}) self.update_floatingip_status(context, fip_id, const.FLOATINGIP_STATUS_ERROR) if new_fip['status'] != new_status: new_fip['status'] = new_status self.update_floatingip_status(context, fip_id, new_status) return new_fip def disassociate_floatingips(self, context, port_id): fip_qry = context.session.query(l3_db_models.FloatingIP) fip_dbs = fip_qry.filter_by(fixed_port_id=port_id) for fip_db in fip_dbs: if not fip_db.router_id: continue try: nsx_router_id = nsx_db.get_nsx_router_id(context.session, fip_db.router_id) self.nsxlib.router.delete_fip_nat_rules( nsx_router_id, fip_db.floating_ip_address, fip_db.fixed_ip_address) except nsx_lib_exc.ResourceNotFound: LOG.warning("Backend NAT rules for fip: %(fip_id)s " "(ext_ip: %(ext_ip)s int_ip: %(int_ip)s) " "not found", {'fip_id': fip_db.id, 'ext_ip': fip_db.floating_ip_address, 'int_ip': fip_db.fixed_ip_address}) self.update_floatingip_status(context, fip_db.id, const.FLOATINGIP_STATUS_DOWN) super(NsxV3Plugin, self).disassociate_floatingips( context, port_id, do_notify=False) def _ensure_default_security_group(self, context, tenant_id): # NOTE(arosen): if in replay mode we'll create all the default # security groups for the user with their data so we don't # want this to be called. if (cfg.CONF.api_replay_mode is False): return super(NsxV3Plugin, self)._ensure_default_security_group( context, tenant_id) def _create_fw_section_for_secgroup(self, nsgroup, is_provider): # NOTE(arosen): if a security group is provider we want to # insert our rules at the top. operation = (nsxlib_consts.FW_INSERT_TOP if is_provider else nsxlib_consts.FW_INSERT_BEFORE) # security-group rules are located in a dedicated firewall section. firewall_section = ( self.nsxlib.firewall_section.create_empty( nsgroup.get('display_name'), nsgroup.get('description'), [nsgroup.get('id')], nsgroup.get('tags'), operation=operation, other_section=self.default_section)) return firewall_section def _create_security_group_backend_resources(self, secgroup): tags = self.nsxlib.build_v3_tags_payload( secgroup, resource_type='os-neutron-secgr-id', project_name=secgroup['tenant_id']) name = self.nsxlib.ns_group.get_name(secgroup) if self.nsxlib.feature_supported( nsxlib_consts.FEATURE_DYNAMIC_CRITERIA): tag_expression = ( self.nsxlib.ns_group.get_port_tag_expression( security.PORT_SG_SCOPE, secgroup['id'])) else: tag_expression = None ns_group = self.nsxlib.ns_group.create( name, secgroup['description'], tags, tag_expression) # security-group rules are located in a dedicated firewall section. firewall_section = self._create_fw_section_for_secgroup( ns_group, secgroup.get(provider_sg.PROVIDER)) return ns_group, firewall_section def _create_firewall_rules(self, context, section_id, nsgroup_id, logging_enabled, action, sg_rules): # since the nsxlib does not have access to the nsx db, # we need to provide a mapping for the remote nsgroup ids. ruleid_2_remote_nsgroup_map = {} for sg_rule in sg_rules: remote_nsgroup_id = None remote_group_id = sg_rule.get('remote_group_id') # skip unnecessary db access when possible if remote_group_id == sg_rule['security_group_id']: remote_nsgroup_id = nsgroup_id elif remote_group_id: remote_nsgroup_id = nsx_db.get_nsx_security_group_id( context.session, remote_group_id) ruleid_2_remote_nsgroup_map[sg_rule['id']] = remote_nsgroup_id return self.nsxlib.firewall_section.create_rules( context, section_id, nsgroup_id, logging_enabled, action, sg_rules, ruleid_2_remote_nsgroup_map) def _handle_api_replay_default_sg(self, context, secgroup_db): """Set default api-replay migrated SG as default manually""" if (secgroup_db['name'] == 'default'): # this is a default security group copied from another cloud # Ugly patch! mark it as default manually with context.session.begin(subtransactions=True): try: default_entry = securitygroup_model.DefaultSecurityGroup( security_group_id=secgroup_db['id'], project_id=secgroup_db['project_id']) context.session.add(default_entry) except Exception as e: LOG.error("Failed to mark migrated security group %(id)s " "as default %(e)s", {'id': secgroup_db['id'], 'e': e}) def create_security_group(self, context, security_group, default_sg=False): secgroup = security_group['security_group'] secgroup['id'] = secgroup.get('id') or uuidutils.generate_uuid() ns_group = {} firewall_section = {} if not default_sg: tenant_id = secgroup['tenant_id'] self._ensure_default_security_group(context, tenant_id) try: ns_group, firewall_section = ( self._create_security_group_backend_resources(secgroup)) # REVISIT(roeyc): Ideally, at this point we need not be under an # open db transactions, however, unittests fail if omitting # subtransactions=True. with db_api.context_manager.writer.using(context): # NOTE(arosen): a neutron security group be default adds rules # that allow egress traffic. We do not want this behavior for # provider security_groups if secgroup.get(provider_sg.PROVIDER) is True: secgroup_db = self.create_provider_security_group( context, security_group) else: secgroup_db = ( super(NsxV3Plugin, self).create_security_group( context, security_group, default_sg)) nsx_db.save_sg_mappings(context, secgroup_db['id'], ns_group['id'], firewall_section['id']) self._process_security_group_properties_create(context, secgroup_db, secgroup, default_sg) if cfg.CONF.api_replay_mode: self._handle_api_replay_default_sg(context, secgroup_db) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.exception("Unable to create security-group on the " "backend.") if ns_group: self.nsxlib.ns_group.delete(ns_group['id']) except Exception: with excutils.save_and_reraise_exception(): section_id = firewall_section.get('id') nsgroup_id = ns_group.get('id') LOG.debug("Neutron failed to create security-group, " "deleting backend resources: " "section %s, ns-group %s.", section_id, nsgroup_id) if nsgroup_id: self.nsxlib.ns_group.delete(nsgroup_id) if section_id: self.nsxlib.firewall_section.delete(section_id) try: sg_rules = secgroup_db['security_group_rules'] # skip if there are no rules in group. i.e provider case if sg_rules: # translate and creates firewall rules. logging = (cfg.CONF.nsx_v3.log_security_groups_allowed_traffic or secgroup.get(sg_logging.LOGGING, False)) action = (nsxlib_consts.FW_ACTION_DROP if secgroup.get(provider_sg.PROVIDER) else nsxlib_consts.FW_ACTION_ALLOW) rules = self._create_firewall_rules( context, firewall_section['id'], ns_group['id'], logging, action, sg_rules) self.save_security_group_rule_mappings(context, rules['rules']) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.exception("Failed to create backend firewall rules " "for security-group %(name)s (%(id)s), " "rolling back changes.", secgroup_db) # default security group deletion requires admin context if default_sg: context = context.elevated() super(NsxV3Plugin, self).delete_security_group( context, secgroup_db['id']) self.nsxlib.ns_group.delete(ns_group['id']) self.nsxlib.firewall_section.delete(firewall_section['id']) return secgroup_db def update_security_group(self, context, id, security_group): orig_secgroup = self.get_security_group( context, id, fields=['id', 'name', 'description']) with db_api.context_manager.writer.using(context): secgroup_res = ( super(NsxV3Plugin, self).update_security_group(context, id, security_group)) self._process_security_group_properties_update( context, secgroup_res, security_group['security_group']) try: nsgroup_id, section_id = nsx_db.get_sg_mappings( context.session, id) self.nsxlib.ns_group.update_on_backend( context, secgroup_res, nsgroup_id, section_id, cfg.CONF.nsx_v3.log_security_groups_allowed_traffic) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.exception("Failed to update security-group %(name)s " "(%(id)s), rolling back changes in " "Neutron.", orig_secgroup) super(NsxV3Plugin, self).update_security_group( context, id, {'security_group': orig_secgroup}) return secgroup_res def delete_security_group(self, context, id): self._prevent_non_admin_delete_provider_sg(context, id) nsgroup_id, section_id = nsx_db.get_sg_mappings( context.session, id) super(NsxV3Plugin, self).delete_security_group(context, id) self.nsxlib.firewall_section.delete(section_id) self.nsxlib.ns_group.delete(nsgroup_id) def create_security_group_rule(self, context, security_group_rule): bulk_rule = {'security_group_rules': [security_group_rule]} return self.create_security_group_rule_bulk(context, bulk_rule)[0] def create_security_group_rule_bulk(self, context, security_group_rules): sg_rules = security_group_rules['security_group_rules'] for r in sg_rules: self._check_local_ip_prefix(context, r['security_group_rule']) # Generate id for security group rule or use one sepecified, # if specified we are running in api-replay as server doesn't # allow id to be specified by default r['security_group_rule']['id'] = ( r['security_group_rule'].get('id') or uuidutils.generate_uuid()) with db_api.context_manager.writer.using(context): rules_db = (super(NsxV3Plugin, self).create_security_group_rule_bulk_native( context, security_group_rules)) for i, r in enumerate(sg_rules): self._process_security_group_rule_properties( context, rules_db[i], r['security_group_rule']) # NOTE(arosen): here are assuming that all of the security # group rules being added are part of the same security # group. We should be validating that this is the case though... sg_id = sg_rules[0]['security_group_rule']['security_group_id'] self._prevent_non_admin_delete_provider_sg(context, sg_id) security_group = self.get_security_group( context, sg_id) action = nsxlib_consts.FW_ACTION_ALLOW if security_group.get(provider_sg.PROVIDER) is True: # provider security groups are drop rules. action = nsxlib_consts.FW_ACTION_DROP sg_id = rules_db[0]['security_group_id'] nsgroup_id, section_id = nsx_db.get_sg_mappings(context.session, sg_id) logging_enabled = (cfg.CONF.nsx_v3.log_security_groups_allowed_traffic or self._is_security_group_logged(context, sg_id)) try: rules = self._create_firewall_rules( context, section_id, nsgroup_id, logging_enabled, action, rules_db) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): for rule in rules_db: super(NsxV3Plugin, self).delete_security_group_rule( context, rule['id']) self.save_security_group_rule_mappings(context, rules['rules']) return rules_db def delete_security_group_rule(self, context, id): rule_db = self._get_security_group_rule(context, id) sg_id = rule_db['security_group_id'] self._prevent_non_admin_delete_provider_sg(context, sg_id) nsgroup_id, section_id = nsx_db.get_sg_mappings(context.session, sg_id) fw_rule_id = nsx_db.get_sg_rule_mapping(context.session, id) self.nsxlib.firewall_section.delete_rule(section_id, fw_rule_id) super(NsxV3Plugin, self).delete_security_group_rule(context, id) def save_security_group_rule_mappings(self, context, firewall_rules): rules = [(rule['display_name'], rule['id']) for rule in firewall_rules] nsx_db.save_sg_rule_mappings(context.session, rules) def _list_availability_zones(self, context, filters=None): # If no native_dhcp_metadata - use neutron AZs if not cfg.CONF.nsx_v3.native_dhcp_metadata: return super(NsxV3Plugin, self)._list_availability_zones( context, filters=filters) #TODO(asarfaty): We may need to use the filters arg, but now it # is here only for overriding the original api result = {} for az in self._availability_zones_data.list_availability_zones(): # Add this availability zone as a network resource result[(az, 'network')] = True result[(az, 'router')] = True return result def _validate_availability_zones_forced(self, context, resource_type, availability_zones): return self.validate_availability_zones(context, resource_type, availability_zones, force=True) def validate_availability_zones(self, context, resource_type, availability_zones, force=False): # This method is called directly from this plugin but also from # registered callbacks if self._is_sub_plugin and not force: # validation should be done together for both plugins return # If no native_dhcp_metadata - use neutron AZs if not cfg.CONF.nsx_v3.native_dhcp_metadata: return super(NsxV3Plugin, self).validate_availability_zones( context, resource_type, availability_zones) # Validate against the configured AZs return self.validate_obj_azs(availability_zones) def get_network_availability_zones(self, net_db): if cfg.CONF.nsx_v3.native_dhcp_metadata: hints = az_validator.convert_az_string_to_list( net_db[az_def.AZ_HINTS]) # When using the configured AZs, the az will always be the same # as the hint (or default if none) if hints: az_name = hints[0] else: az_name = self.get_default_az().name return [az_name] else: return [] def recalculate_snat_rules_for_router(self, context, router, subnets): """Recalculate router snat rules for specific subnets. Invoked when subnetpool address scope changes. """ nsx_router_id = nsx_db.get_nsx_router_id(context.session, router['id']) if not router['external_gateway_info']: return LOG.info("Recalculating snat rules for router %s", router['id']) fip = router['external_gateway_info']['external_fixed_ips'][0] ext_addr = fip['ip_address'] gw_address_scope = self._get_network_address_scope( context, router['external_gateway_info']['network_id']) # TODO(annak): improve amount of backend calls by rebuilding all # snat rules when API is available for subnet in subnets: if gw_address_scope: subnet_address_scope = self._get_subnetpool_address_scope( context, subnet['subnetpool_id']) LOG.info("Deleting SNAT rule for %(router)s " "and subnet %(subnet)s", {'router': router['id'], 'subnet': subnet['id']}) # Delete rule for this router/subnet pair if it exists self.nsxlib.router.delete_gw_snat_rule_by_source( nsx_router_id, ext_addr, subnet['cidr'], skip_not_found=True) if (gw_address_scope != subnet_address_scope): # subnet is no longer under same address scope with GW LOG.info("Adding SNAT rule for %(router)s " "and subnet %(subnet)s", {'router': router['id'], 'subnet': subnet['id']}) self.nsxlib.router.add_gw_snat_rule( nsx_router_id, ext_addr, source_net=subnet['cidr'], bypass_firewall=False) vmware-nsx-12.0.1/vmware_nsx/plugins/__init__.py0000666000175100017510000000000013244523345021706 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/common/0000775000175100017510000000000013244524600021070 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/common/housekeeper/0000775000175100017510000000000013244524600023407 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/common/housekeeper/__init__.py0000666000175100017510000000000013244523345025515 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/common/housekeeper/housekeeper.py0000666000175100017510000000555313244523345026317 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import stevedore from neutron_lib import exceptions as n_exc from vmware_nsx.common import locking LOG = log.getLogger(__name__) ALL_DUMMY_JOB = { 'name': 'all', 'description': 'Execute all housekeepers', 'enabled': True} class NsxvHousekeeper(stevedore.named.NamedExtensionManager): def __init__(self, hk_ns, hk_jobs): self.readonly = cfg.CONF.nsxv.housekeeping_readonly if self.readonly: LOG.info('Housekeeper initialized in readonly mode') else: LOG.info('Housekeeper initialized') self.jobs = {} super(NsxvHousekeeper, self).__init__( hk_ns, hk_jobs, invoke_on_load=True, invoke_args=(self.readonly,)) LOG.info("Loaded housekeeping job names: %s", self.names()) for job in self: if job.obj.get_name() in cfg.CONF.nsxv.housekeeping_jobs: self.jobs[job.obj.get_name()] = job.obj def get(self, job_name): if job_name == ALL_DUMMY_JOB.get('name'): return ALL_DUMMY_JOB for job in self: name = job.obj.get_name() if job_name == name: return {'name': job_name, 'description': job.obj.get_description(), 'enabled': job_name in self.jobs} raise n_exc.ObjectNotFound(id=job_name) def list(self): results = [ALL_DUMMY_JOB] for job in self: job_name = job.obj.get_name() results.append({'name': job_name, 'description': job.obj.get_description(), 'enabled': job_name in self.jobs}) return results def run(self, context, job_name): if context.is_admin: with locking.LockManager.get_lock('nsx-housekeeper'): if job_name == ALL_DUMMY_JOB.get('name'): for job in self.jobs.values(): job.run(context) else: job = self.jobs.get(job_name) if job: job.run(context) else: raise n_exc.ObjectNotFound(id=job_name) else: raise n_exc.AdminRequired() vmware-nsx-12.0.1/vmware_nsx/plugins/common/housekeeper/base_job.py0000666000175100017510000000342113244523345025534 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import log import six LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class BaseJob(object): _core_plugin = None def __init__(self, readonly): self.readonly = readonly or (self.get_name() in cfg.CONF.nsxv.housekeeping_readonly_jobs) LOG.info('Housekeeping: %s job initialized in %s mode', self.get_name(), 'RO' if self.readonly else 'RW') @property def plugin(self): if not self._core_plugin: self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin() is True: # get the plugin that match this driver self._core_plugin = self.get_project_plugin( self._core_plugin) return self._core_plugin @abc.abstractmethod def get_name(self): pass @abc.abstractmethod def get_description(self): pass @abc.abstractmethod def run(self, context): pass @abc.abstractmethod def get_project_plugin(self, plugin): pass vmware-nsx-12.0.1/vmware_nsx/plugins/common/__init__.py0000666000175100017510000000000013244523345023176 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/common/plugin.py0000666000175100017510000003654713244523345022766 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron.db import _resource_extend as resource_extend from neutron.db import address_scope_db from neutron.db import api as db_api from neutron.db import db_base_plugin_v2 from neutron.db import l3_db from neutron.db import models_v2 from neutron_lib.api.definitions import address_scope as ext_address_scope from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api.definitions import network as net_def from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import subnet as subnet_def from neutron_lib.api import validators from neutron_lib.api.validators import availability_zone as az_validator from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context as n_context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_lib.utils import net from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc LOG = logging.getLogger(__name__) @resource_extend.has_resource_extenders class NsxPluginBase(db_base_plugin_v2.NeutronDbPluginV2, address_scope_db.AddressScopeDbMixin): """Common methods for NSX-V and NSX-V3 plugins""" @property def plugin_type(self): return "Unknown" @staticmethod @resource_extend.extends([net_def.COLLECTION_NAME]) def _ext_extend_network_dict(result, netdb): ctx = n_context.get_admin_context() # get the core plugin as this is a static method with no 'self' plugin = directory.get_plugin() with db_api.context_manager.writer.using(ctx): plugin._extension_manager.extend_network_dict( ctx.session, netdb, result) @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _ext_extend_port_dict(result, portdb): ctx = n_context.get_admin_context() # get the core plugin as this is a static method with no 'self' plugin = directory.get_plugin() with db_api.context_manager.writer.using(ctx): plugin._extension_manager.extend_port_dict( ctx.session, portdb, result) @staticmethod @resource_extend.extends([subnet_def.COLLECTION_NAME]) def _ext_extend_subnet_dict(result, subnetdb): ctx = n_context.get_admin_context() # get the core plugin as this is a static method with no 'self' plugin = directory.get_plugin() with db_api.context_manager.writer.using(ctx): plugin._extension_manager.extend_subnet_dict( ctx.session, subnetdb, result) def get_network_az_by_net_id(self, context, network_id): try: network = self.get_network(context, network_id) except Exception: return self.get_default_az() return self.get_network_az(network) def _get_router_interface_ports_by_network( self, context, router_id, network_id): port_filters = {'device_id': [router_id], 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF], 'network_id': [network_id]} return self.get_ports(context, filters=port_filters) def get_router_for_floatingip(self, context, internal_port, internal_subnet, external_network_id): router_id = super(NsxPluginBase, self).get_router_for_floatingip( context, internal_port, internal_subnet, external_network_id) if router_id: router = self._get_router(context.elevated(), router_id) if not router.enable_snat: msg = _("Unable to assign a floating IP to a router that " "has SNAT disabled") raise n_exc.InvalidInput(error_message=msg) return router_id def _get_network_address_scope(self, context, net_id): network = self.get_network(context, net_id) return network.get(ext_address_scope.IPV4_ADDRESS_SCOPE) def _get_subnet_address_scope(self, context, subnet_id): subnet = self.get_subnet(context, subnet_id) if not subnet['subnetpool_id']: return subnetpool = self.get_subnetpool(context, subnet['subnetpool_id']) return subnetpool.get('address_scope_id', '') def _get_subnetpool_address_scope(self, context, subnetpool_id): if not subnetpool_id: return subnetpool = self.get_subnetpool(context, subnetpool_id) return subnetpool.get('address_scope_id', '') # TODO(asarfaty): the NSX-V3 needs a very similar code too def _validate_address_scope_for_router_interface(self, context, router_id, gw_network_id, subnet_id): """Validate that the GW address scope is the same as the interface""" gw_address_scope = self._get_network_address_scope(context, gw_network_id) if not gw_address_scope: return subnet_address_scope = self._get_subnet_address_scope(context, subnet_id) if (not subnet_address_scope or subnet_address_scope != gw_address_scope): raise nsx_exc.NsxRouterInterfaceDoesNotMatchAddressScope( router_id=router_id, address_scope_id=gw_address_scope) def _get_router_interfaces(self, context, router_id): port_filters = {'device_id': [router_id], 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} return self.get_ports(context, filters=port_filters) def _find_router_subnets_cidrs(self, context, router_id): """Retrieve cidrs of subnets attached to the specified router.""" subnets = self._find_router_subnets(context, router_id) return [subnet['cidr'] for subnet in subnets] def _find_router_subnets_cidrs_per_addr_scope(self, context, router_id): """Generate a list of cidrs per address pool. Go over all the router interface subnets. return a list of lists of subnets cidrs belonging to same address pool. """ subnets = self._find_router_subnets(context, router_id) cidrs_map = {} for subnet in subnets: ads = self._get_subnetpool_address_scope( context, subnet['subnetpool_id']) or '' if ads not in cidrs_map: cidrs_map[ads] = [] cidrs_map[ads].append(subnet['cidr']) return list(cidrs_map.values()) def _get_port_by_device_id(self, context, device_id, device_owner): """Retrieve ports associated with a specific device id. Used for retrieving all neutron ports attached to a given router. """ port_qry = context.session.query(models_v2.Port) return port_qry.filter_by( device_id=device_id, device_owner=device_owner,).all() def _find_router_subnets(self, context, router_id): """Retrieve subnets attached to the specified router.""" ports = self._get_port_by_device_id(context, router_id, l3_db.DEVICE_OWNER_ROUTER_INTF) # No need to check for overlapping CIDRs subnets = [] for port in ports: for ip in port.get('fixed_ips', []): subnet_qry = context.session.query(models_v2.Subnet) subnet = subnet_qry.filter_by(id=ip.subnet_id).one() subnets.append({'id': subnet.id, 'cidr': subnet.cidr, 'subnetpool_id': subnet.subnetpool_id, 'ip_version': subnet.ip_version}) return subnets def _find_router_gw_subnets(self, context, router): """Retrieve external subnets attached to router GW""" if not router['external_gateway_info']: return [] subnets = [] for fip in router['external_gateway_info']['external_fixed_ips']: subnet = self.get_subnet(context, fip['subnet_id']) subnets.append(subnet) return subnets def recalculate_snat_rules_for_router(self, context, router, subnets): """Method to recalculate router snat rules for specific subnets. Invoked when subnetpool address scope changes. Implemented in child plugin classes """ pass def recalculate_fw_rules_for_router(self, context, router, subnets): """Method to recalculate router FW rules for specific subnets. Invoked when subnetpool address scope changes. Implemented in child plugin classes """ pass def _filter_subnets_by_subnetpool(self, subnets, subnetpool_id): return [subnet for subnet in subnets if subnet['subnetpool_id'] == subnetpool_id] def on_subnetpool_address_scope_updated(self, resource, event, trigger, **kwargs): context = kwargs['context'] routers = self.get_routers(context) subnetpool_id = kwargs['subnetpool_id'] elevated_context = context.elevated() LOG.info("Inspecting routers for potential configuration changes " "due to address scope change on subnetpool %s", subnetpool_id) for rtr in routers: subnets = self._find_router_subnets(elevated_context, rtr['id']) gw_subnets = self._find_router_gw_subnets(elevated_context, rtr) affected_subnets = self._filter_subnets_by_subnetpool( subnets, subnetpool_id) affected_gw_subnets = self._filter_subnets_by_subnetpool( gw_subnets, subnetpool_id) if not affected_subnets and not affected_gw_subnets: # No subnets were affected by address scope change continue if (affected_subnets == subnets and affected_gw_subnets == gw_subnets): # All subnets remain under the same address scope # (all router subnets were allocated from subnetpool_id) continue # Update east-west FW rules self.recalculate_fw_rules_for_router(context, rtr, affected_subnets) if not rtr['external_gateway_info']: continue if not rtr['external_gateway_info']['enable_snat']: LOG.warning("Due to address scope change on subnetpool " "%(subnetpool)s, uniqueness on interface " "addresses on no-snat router %(router) is no " "longer guaranteed, which may result in faulty " "operation.", {'subnetpool': subnetpool_id, 'router': rtr['id']}) continue if affected_gw_subnets: # GW address scope have changed - we need to revisit snat # rules for all router interfaces affected_subnets = subnets self.recalculate_snat_rules_for_router(context, rtr, affected_subnets) def _validate_max_ips_per_port(self, fixed_ip_list, device_owner): """Validate the number of fixed ips on a port Do not allow multiple ip addresses on a port since the nsx backend cannot add multiple static dhcp bindings with the same port """ if (device_owner and net.is_port_trusted({'device_owner': device_owner})): return if validators.is_attr_set(fixed_ip_list) and len(fixed_ip_list) > 1: msg = _('Exceeded maximum amount of fixed ips per port') raise n_exc.InvalidInput(error_message=msg) def _extract_external_gw(self, context, router, is_extract=True): r = router['router'] gw_info = constants.ATTR_NOT_SPECIFIED # First extract the gateway info in case of updating # gateway before edge is deployed. if 'external_gateway_info' in r: gw_info = r.get('external_gateway_info', {}) if is_extract: del r['external_gateway_info'] network_id = (gw_info.get('network_id') if gw_info else None) if network_id: ext_net = self._get_network(context.elevated(), network_id) if not ext_net.external: msg = (_("Network '%s' is not a valid external network") % network_id) raise n_exc.BadRequest(resource='router', msg=msg) subnets = self._get_subnets_by_network(context.elevated(), network_id) if not subnets: msg = _("Cannot update gateway on Network '%s' " "with no subnet") % network_id raise n_exc.BadRequest(resource='router', msg=msg) return gw_info def get_subnets_by_network(self, context, network_id): return [self._make_subnet_dict(subnet_obj) for subnet_obj in self._get_subnets_by_network(context.elevated(), network_id)] def _validate_routes(self, context, router_id, routes): super(NsxPluginBase, self)._validate_routes( context, router_id, routes) # do not allow adding a default route. NSX-v/v3 don't support it for route in routes: if route.get('destination', '').startswith('0.0.0.0/'): msg = _("Cannot set a default route using static routes") raise n_exc.BadRequest(resource='router', msg=msg) @staticmethod @resource_extend.extends([net_def.COLLECTION_NAME]) def _extend_availability_zone_hints(net_res, net_db): net_res[az_def.AZ_HINTS] = az_validator.convert_az_string_to_list( net_db[az_def.AZ_HINTS]) def _validate_external_subnet(self, context, network_id): filters = {'id': [network_id], 'router:external': [True]} nets = self.get_networks(context, filters=filters) if len(nets) > 0: err_msg = _("Can not enable DHCP on external network") raise n_exc.InvalidInput(error_message=err_msg) # Register the callback def _validate_network_has_subnet(resource, event, trigger, **kwargs): network_id = kwargs.get('network_id') subnets = kwargs.get('subnets') if not subnets: msg = _('No subnet defined on network %s') % network_id raise n_exc.InvalidInput(error_message=msg) def subscribe(): registry.subscribe(_validate_network_has_subnet, resources.ROUTER_GATEWAY, events.BEFORE_CREATE) subscribe() vmware-nsx-12.0.1/vmware_nsx/plugins/nsx/0000775000175100017510000000000013244524600020410 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx/utils.py0000666000175100017510000000530013244523345022127 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron_lib import context as n_context from neutron_lib import exceptions from neutron_lib.plugins import directory from vmware_nsx.db import db as nsx_db def is_tvd_core_plugin(): core_plugin = cfg.CONF.core_plugin if (core_plugin.endswith('NsxTVDPlugin') or core_plugin.endswith('vmware_nsxtvd')): return True return False def get_tvd_plugin_type_for_project(project_id, context=None): """Get the plugin type used by a project Raise an exception if not found or the plugin is not in use """ if not context: context = n_context.get_admin_context() core_plugin = directory.get_plugin() return core_plugin.get_plugin_type_from_project(context, project_id) def filter_plugins(cls): """ Class decorator to separate the results of each of the given methods by plugin """ def get_project_mapping(context, project_id): """Return the plugin associated with this project""" mapping = nsx_db.get_project_plugin_mapping( context.session, project_id) if mapping: return mapping['plugin'] else: raise exceptions.ObjectNotFound(id=project_id) def add_separate_plugin_hook(name): orig_method = getattr(cls, name, None) def filter_results_by_plugin(self, context, **kwargs): """Run the original get-list method, and filter the results by the project id of the context """ entries = orig_method(self, context, **kwargs) if not context.project_id: return entries req_p = get_project_mapping(context, context.project_id) for entry in entries[:]: if entry.get('tenant_id'): p = get_project_mapping(context, entry['tenant_id']) if p != req_p: entries.remove(entry) return entries setattr(cls, name, filter_results_by_plugin) for method in cls.methods_to_separate: add_separate_plugin_hook(method) return cls vmware-nsx-12.0.1/vmware_nsx/plugins/nsx/__init__.py0000666000175100017510000000000013244523345022516 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx/plugin.py0000666000175100017510000012440713244523345022277 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import network as net_def from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import subnet as subnet_def from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import context as n_context from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils from neutron.db import _resource_extend as resource_extend from neutron.db import _utils as db_utils from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db import api as db_api from neutron.db.availability_zone import router as router_az_db from neutron.db import external_net_db from neutron.db import extradhcpopt_db from neutron.db import extraroute_db from neutron.db import l3_db from neutron.db import l3_gwmode_db from neutron.db.models import l3 as l3_db_models from neutron.db.models import securitygroup as securitygroup_model # noqa from neutron.db import models_v2 from neutron.db import portsecurity_db from neutron.db import quota_db # noqa from neutron.db import securitygroups_db from neutron.quota import resource_registry from neutron_lib.api import validators from neutron_lib import exceptions as n_exc from vmware_nsx.common import availability_zones as nsx_com_az from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import managers as nsx_managers from vmware_nsx.db import ( routertype as rt_rtr) from vmware_nsx.db import db as nsx_db from vmware_nsx.db import nsx_portbindings_db as pbin_db from vmware_nsx.extensions import advancedserviceproviders as as_providers from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.common import plugin as nsx_plugin_common from vmware_nsx.plugins.dvs import plugin as dvs from vmware_nsx.plugins.nsx_v import plugin as v from vmware_nsx.plugins.nsx_v3 import plugin as t from vmware_nsx.services.lbaas.nsx import lb_driver_v2 LOG = logging.getLogger(__name__) TVD_PLUGIN_TYPE = "Nsx-TVD" @resource_extend.has_resource_extenders class NsxTVDPlugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin, addr_pair_db.AllowedAddressPairsMixin, agents_db.AgentDbMixin, nsx_plugin_common.NsxPluginBase, rt_rtr.RouterType_mixin, external_net_db.External_net_db_mixin, extraroute_db.ExtraRoute_db_mixin, extradhcpopt_db.ExtraDhcpOptMixin, router_az_db.RouterAvailabilityZoneMixin, l3_gwmode_db.L3_NAT_db_mixin, pbin_db.NsxPortBindingMixin, portsecurity_db.PortSecurityDbMixin, securitygroups_db.SecurityGroupDbMixin, nsx_com_az.NSXAvailabilityZonesPluginCommon, projectpluginmap.ProjectPluginMapPluginBase): supported_extension_aliases = ['project-plugin-map'] __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True @resource_registry.tracked_resources( network=models_v2.Network, port=models_v2.Port, subnet=models_v2.Subnet, subnetpool=models_v2.SubnetPool, security_group=securitygroup_model.SecurityGroup, security_group_rule=securitygroup_model.SecurityGroupRule, router=l3_db_models.Router, floatingip=l3_db_models.FloatingIP) def __init__(self): self._extension_manager = nsx_managers.ExtensionManager() LOG.info("Start NSX TVD Plugin") # Validate configuration config.validate_nsx_config_options() super(NsxTVDPlugin, self).__init__() # init the different supported plugins self.init_plugins() # init the extensions supported by any of the plugins self.init_extensions() self.lbv2_driver = lb_driver_v2.EdgeLoadbalancerDriverV2() self._unsubscribe_callback_events() @staticmethod def plugin_type(): return TVD_PLUGIN_TYPE @staticmethod def is_tvd_plugin(): return True def _init_plugin(self, map_type, plugin_class): try: self.plugins[map_type] = plugin_class() except Exception as e: LOG.warning("%s plugin will not be supported: %s", map_type.upper(), e) if map_type == self.default_plugin: msg = (_("The default plugin %(def)s failed to start. " "Reason: %(reason)s") % {'def': self.default_plugin, 'reason': e}) LOG.error(msg) raise nsx_exc.NsxPluginException(err_msg=msg) else: LOG.info("%s plugin will be supported", map_type.upper()) def init_plugins(self): # initialize all supported plugins self.plugins = {} self.as_providers = {} # update the default plugin for new projects self.default_plugin = cfg.CONF.nsx_tvd.default_plugin plugins = [(projectpluginmap.NsxPlugins.NSX_T, t.NsxV3Plugin), (projectpluginmap.NsxPlugins.NSX_V, v.NsxVPluginV2), (projectpluginmap.NsxPlugins.DVS, dvs.NsxDvsV2)] for (map_type, plugin_class) in plugins: self._init_plugin(map_type, plugin_class) if not len(self.plugins): msg = _("No active plugins were found") raise nsx_exc.NsxPluginException(err_msg=msg) for k, val in self.plugins.items(): if "advanced-service-providers" in val.supported_extension_aliases: self.as_providers[k] = val LOG.info("NSX-TVD plugin will use %s as the default plugin", self.default_plugin) # validate the availability zones configuration self.init_availability_zones() def get_plugin_by_type(self, plugin_type): return self.plugins.get(plugin_type) def init_extensions(self): # Support all the extensions supported by any of the plugins extensions = [] for plugin in self.plugins: extensions.extend(self.plugins[plugin].supported_extension_aliases) self.supported_extension_aliases.extend(list(set(extensions))) # mark extensions which are supported by only one of the plugins self._unsupported_fields = {} for plugin in self.plugins: # TODO(asarfaty): add other resources here plugin_type = self.plugins[plugin].plugin_type() self._unsupported_fields[plugin_type] = {'router': [], 'port': [], 'security_group': []} # router size and type are supported only by the V plugin if plugin_type in [t.NsxV3Plugin.plugin_type(), dvs.NsxDvsV2.plugin_type()]: self._unsupported_fields[plugin_type]['router'] = [ 'router_size', 'router_type'] # port mac learning, and provider sg are not supported by # the dvs plugin if plugin_type in [dvs.NsxDvsV2.plugin_type()]: self._unsupported_fields[plugin_type]['port'] = [ 'mac_learning_enabled', 'provider_security_groups'] # security group policy can be supported only by nsx-v if plugin_type in [t.NsxV3Plugin.plugin_type(), dvs.NsxDvsV2.plugin_type()]: self._unsupported_fields[plugin_type]['security_group'] = [ 'policy'] def init_availability_zones(self): # Make sure there are no overlaps between v/t availability zones if (self.plugins.get(projectpluginmap.NsxPlugins.NSX_V) and self.plugins.get(projectpluginmap.NsxPlugins.NSX_T) and bool(set(cfg.CONF.nsxv.availability_zones) & set(cfg.CONF.nsx_v3.availability_zones))): msg = _("Cannot use the same availability zones in NSX-V and T") raise nsx_exc.NsxPluginException(err_msg=msg) def _unsubscribe_callback_events(self): # unsubscribe the callback that should be called on all plugins # other that NSX-T. registry.unsubscribe_all( l3_db.L3_NAT_dbonly_mixin._prevent_l3_port_delete_callback) # Instead we will subscribe our internal callback. registry.subscribe(self._prevent_l3_port_delete_callback, resources.PORT, events.BEFORE_DELETE) @staticmethod def _prevent_l3_port_delete_callback(resource, event, trigger, **kwargs): """Register a callback to replace the default one This callback will prevent port deleting only if the port plugin is not NSX-T (in NSX-T plugin it was already handled) """ context = kwargs['context'] port_id = kwargs['port_id'] port_check = kwargs['port_check'] l3plugin = directory.get_plugin(plugin_constants.L3) if l3plugin and port_check: # if not nsx-t - call super code core_plugin = directory.get_plugin() db_port = core_plugin._get_port(context, port_id) p = core_plugin._get_plugin_from_net_id( context, db_port['network_id']) if p.plugin_type() != projectpluginmap.NsxPlugins.NSX_T: l3plugin.prevent_l3_port_deletion(context, port_id) def _validate_obj_extensions(self, data, plugin_type, obj_type): """prevent configuration of unsupported extensions""" for field in self._unsupported_fields[plugin_type][obj_type]: if validators.is_attr_set(data.get(field)): err_msg = (_('Can not support %(field)s extension for ' '%(obj_type)s %(p)s plugin') % { 'field': field, 'obj_type': obj_type, 'p': plugin_type}) raise n_exc.InvalidInput(error_message=err_msg) def _cleanup_obj_fields(self, data, plugin_type, obj_type): """Remove data of unsupported extensions""" for field in self._unsupported_fields[plugin_type][obj_type]: if field in data: del data[field] def _list_availability_zones(self, context, filters=None): p = self._get_plugin_for_request(context, filters) if p: return p._list_availability_zones(context, filters=filters) return [] def validate_availability_zones(self, context, resource_type, availability_zones): p = self._get_plugin_from_project(context, context.project_id) return p.validate_availability_zones(context, resource_type, availability_zones) def _get_plugin_from_net_id(self, context, net_id): # get the network using the super plugin - here we use the # _get_network (so as not to call the make dict method) network = self._get_network(context, net_id) return self._get_plugin_from_project(context, network['tenant_id']) def get_network_availability_zones(self, net_db): ctx = n_context.get_admin_context() p = self._get_plugin_from_project(ctx, net_db['tenant_id']) return p.get_network_availability_zones(net_db) def create_network(self, context, network): net_data = network['network'] tenant_id = net_data['tenant_id'] self._ensure_default_security_group(context, tenant_id) p = self._get_plugin_from_project(context, tenant_id) return p.create_network(context, network) @db_api.retry_if_session_inactive() def create_network_bulk(self, context, networks): #Implement create bulk so that the plugin calculation will be done once objects = [] items = networks['networks'] # look at the first network to find out the project & plugin net_data = items[0]['network'] tenant_id = net_data['tenant_id'] self._ensure_default_security_group(context, tenant_id) p = self._get_plugin_from_project(context, tenant_id) # create all networks one by one try: with db_api.context_manager.writer.using(context): for item in items: objects.append(p.create_network(context, item)) except Exception: with excutils.save_and_reraise_exception(): LOG.error("An exception occurred while creating " "the networks:%(item)s", {'item': item}) return objects def delete_network(self, context, id): p = self._get_plugin_from_net_id(context, id) p.delete_network(context, id) def get_network(self, context, id, fields=None): p = self._get_plugin_from_net_id(context, id) return p.get_network(context, id, fields=fields) def _get_plugin_for_request(self, context, filters, keys=None): project_id = context.project_id if filters: if filters.get('tenant_id'): project_id = filters.get('tenant_id') elif filters.get('project_id'): project_id = filters.get('project_id') else: # we have specific filters on the request. If those are # specific enough, we should not filter by project if filters.get('id'): return if keys: for key in keys: if filters.get(key): return # If there are multiple tenants/projects being requested then # we will not filter according to the plugin if isinstance(project_id, list): return return self._get_plugin_from_project(context, project_id) def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters, keys=['shared']) filters = filters or {} with db_api.context_manager.reader.using(context): networks = ( super(NsxTVDPlugin, self).get_networks( context, filters, fields, sorts, limit, marker, page_reverse)) for net in networks[:]: p = self._get_plugin_from_project(context, net['tenant_id']) if p == req_p or req_p is None: p._extend_get_network_dict_provider(context, net) else: networks.remove(net) return (networks if not fields else [db_utils.resource_fields(network, fields) for network in networks]) def update_network(self, context, id, network): p = self._get_plugin_from_net_id(context, id) return p.update_network(context, id, network) def create_port(self, context, port): net_id = port['port']['network_id'] p = self._get_plugin_from_net_id(context, net_id) self._validate_obj_extensions( port['port'], p.plugin_type(), 'port') new_port = p.create_port(context, port) self._cleanup_obj_fields( new_port, p.plugin_type(), 'port') return new_port def update_port(self, context, id, port): db_port = self._get_port(context, id) p = self._get_plugin_from_net_id(context, db_port['network_id']) self._validate_obj_extensions( port['port'], p.plugin_type(), 'port') return p.update_port(context, id, port) def delete_port(self, context, id, **kwargs): db_port = self._get_port(context, id) p = self._get_plugin_from_net_id(context, db_port['network_id']) p.delete_port(context, id, **kwargs) def get_port(self, context, id, fields=None): db_port = self._get_port(context, id) p = self._get_plugin_from_net_id(context, db_port['network_id']) port = p.get_port(context, id, fields=fields) self._cleanup_obj_fields( port, p.plugin_type(), 'port') return port def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters, keys=['device_id']) filters = filters or {} with db_api.context_manager.reader.using(context): ports = ( super(NsxTVDPlugin, self).get_ports( context, filters, fields, sorts, limit, marker, page_reverse)) # Add port extensions for port in ports[:]: port_model = None if 'id' in port: port_model = self._get_port(context, port['id']) resource_extend.apply_funcs('ports', port, port_model) p = self._get_plugin_from_net_id(context, port['network_id']) if p == req_p or req_p is None: if hasattr(p, '_extend_get_port_dict_qos_and_binding'): p._extend_get_port_dict_qos_and_binding(context, port) else: if not port_model: port_model = port p._extend_port_dict_binding(port, port_model) if hasattr(p, '_remove_provider_security_groups_from_list'): p._remove_provider_security_groups_from_list(port) self._cleanup_obj_fields( port, p.plugin_type(), 'port') else: ports.remove(port) return (ports if not fields else [db_utils.resource_fields(port, fields) for port in ports]) def _get_subnet_plugin_by_id(self, context, subnet_id): db_subnet = self._get_subnet(context, subnet_id) return self._get_plugin_from_net_id(context, db_subnet['network_id']) def get_subnet(self, context, id, fields=None): p = self._get_subnet_plugin_by_id(context, id) return p.get_subnet(context, id, fields=fields) def get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Check if we need to invoke metadata search. Here we are unable to # filter according to projects as this is from the nova api service # so we invoke on all plugins that support this extension if ((fields and as_providers.ADV_SERVICE_PROVIDERS in fields) or (filters and filters.get(as_providers.ADV_SERVICE_PROVIDERS))): for plugin in self.as_providers.values(): subnets = plugin.get_subnets(context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) if subnets: return subnets return [] else: # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters) filters = filters or {} subnets = super(NsxTVDPlugin, self).get_subnets( context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) for subnet in subnets[:]: p = self._get_plugin_from_project(context, subnet['tenant_id']) if req_p and p != req_p: subnets.remove(subnet) return subnets def delete_subnet(self, context, id): p = self._get_subnet_plugin_by_id(context, id) p.delete_subnet(context, id) def _get_subnet_plugin(self, context, subnet_data): # get the plugin of the associated network net_id = subnet_data['network_id'] net_plugin = self._get_plugin_from_net_id(context, net_id) # make sure it matches the plugin of the current tenant tenant_id = subnet_data['tenant_id'] tenant_plugin = self._get_plugin_from_project(context, tenant_id) if tenant_plugin.plugin_type() != net_plugin.plugin_type(): err_msg = (_('Subnet should belong to the %s plugin ' 'as the network') % net_plugin.plugin_type()) raise n_exc.InvalidInput(error_message=err_msg) return net_plugin def create_subnet(self, context, subnet): p = self._get_subnet_plugin(context, subnet['subnet']) return p.create_subnet(context, subnet) def create_subnet_bulk(self, context, subnets): # look at the first subnet to find out the project & plugin items = subnets['subnets'] p = self._get_subnet_plugin(context, items[0]['subnet']) return p.create_subnet_bulk(context, subnets) def update_subnet(self, context, id, subnet): p = self._get_subnet_plugin_by_id(context, id) return p.update_subnet(context, id, subnet) def get_router_availability_zones(self, router): ctx = n_context.get_admin_context() p = self._get_plugin_from_project(ctx, router['tenant_id']) return p.get_router_availability_zones(router) def _validate_router_gw_plugin(self, context, router_plugin, gw_info): if gw_info and gw_info.get('network_id'): net_plugin = self._get_plugin_from_net_id( context, gw_info['network_id']) if net_plugin.plugin_type() != router_plugin.plugin_type(): err_msg = (_('Router gateway should belong to the %s plugin ' 'as the router') % router_plugin.plugin_type()) raise n_exc.InvalidInput(error_message=err_msg) def _validate_router_interface_plugin(self, context, router_plugin, interface_info): is_port, is_sub = self._validate_interface_info(interface_info) if is_port: net_id = self._get_port( context, interface_info['port_id'])['network_id'] elif is_sub: net_id = self._get_subnet( context, interface_info['subnet_id'])['network_id'] net_plugin = self._get_plugin_from_net_id(context, net_id) if net_plugin.plugin_type() != router_plugin.plugin_type(): err_msg = (_('Router interface should belong to the %s plugin ' 'as the router') % router_plugin.plugin_type()) raise n_exc.InvalidInput(error_message=err_msg) def _get_plugin_from_router_id(self, context, router_id): # get the router using the super plugin - here we use the # _get_router (so as not to call the make dict method) router = self._get_router(context, router_id) return self._get_plugin_from_project(context, router['tenant_id']) def create_router(self, context, router): tenant_id = router['router']['tenant_id'] self._ensure_default_security_group(context, tenant_id) p = self._get_plugin_from_project(context, tenant_id) self._validate_router_gw_plugin(context, p, router['router'].get( 'external_gateway_info')) self._validate_obj_extensions( router['router'], p.plugin_type(), 'router') new_router = p.create_router(context, router) self._cleanup_obj_fields( new_router, p.plugin_type(), 'router') return new_router def update_router(self, context, router_id, router): p = self._get_plugin_from_router_id(context, router_id) self._validate_router_gw_plugin(context, p, router['router'].get( 'external_gateway_info')) self._validate_obj_extensions( router['router'], p.plugin_type(), 'router') return p.update_router(context, router_id, router) def get_router(self, context, id, fields=None): p = self._get_plugin_from_router_id(context, id) router = p.get_router(context, id, fields=fields) self._cleanup_obj_fields(router, p.plugin_type(), 'router') return router def delete_router(self, context, id): p = self._get_plugin_from_router_id(context, id) p.delete_router(context, id) def add_router_interface(self, context, router_id, interface_info): p = self._get_plugin_from_router_id(context, router_id) self._validate_router_interface_plugin(context, p, interface_info) return p.add_router_interface(context, router_id, interface_info) def remove_router_interface(self, context, router_id, interface_info): p = self._get_plugin_from_router_id(context, router_id) return p.remove_router_interface(context, router_id, interface_info) def _validate_fip_router_plugin(self, context, fip_plugin, fip_data): if 'router_id' in fip_data: router_plugin = self._get_plugin_from_router_id( context, fip_data['router_id']) if router_plugin.plugin_type() != fip_plugin.plugin_type(): err_msg = (_('Floatingip router should belong to the %s ' 'plugin as the floatingip') % fip_plugin.plugin_type()) raise n_exc.InvalidInput(error_message=err_msg) def get_routers(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters) routers = super(NsxTVDPlugin, self).get_routers( context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) for router in routers[:]: p = self._get_plugin_from_project(context, router['tenant_id']) if req_p and p != req_p: routers.remove(router) return routers def create_floatingip(self, context, floatingip): net_id = floatingip['floatingip']['floating_network_id'] p = self._get_plugin_from_net_id(context, net_id) self._validate_fip_router_plugin(context, p, floatingip['floatingip']) return p.create_floatingip(context, floatingip) def update_floatingip(self, context, id, floatingip): fip = self._get_floatingip(context, id) net_id = fip['floating_network_id'] p = self._get_plugin_from_net_id(context, net_id) self._validate_fip_router_plugin(context, p, floatingip['floatingip']) return p.update_floatingip(context, id, floatingip) def delete_floatingip(self, context, id): fip = self._get_floatingip(context, id) net_id = fip['floating_network_id'] p = self._get_plugin_from_net_id(context, net_id) return p.delete_floatingip(context, id) def get_floatingip(self, context, id, fields=None): fip = self._get_floatingip(context, id) net_id = fip['floating_network_id'] p = self._get_plugin_from_net_id(context, net_id) return p.get_floatingip(context, id, fields=fields) def get_floatingips(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters) fips = super(NsxTVDPlugin, self).get_floatingips( context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) for fip in fips[:]: p = self._get_plugin_from_project(context, fip['tenant_id']) if req_p and p != req_p: fips.remove(fip) return fips def disassociate_floatingips(self, context, port_id): db_port = self._get_port(context, port_id) p = self._get_plugin_from_net_id(context, db_port['network_id']) return p.disassociate_floatingips(context, port_id) def _get_plugin_from_sg_id(self, context, sg_id): sg = self._get_security_group(context, sg_id) return self._get_plugin_from_project(context, sg['tenant_id']) def create_security_group(self, context, security_group, default_sg=False): if not default_sg: secgroup = security_group['security_group'] tenant_id = secgroup['tenant_id'] self._ensure_default_security_group(context, tenant_id) p = self._get_plugin_from_project(context, context.project_id) self._validate_obj_extensions( security_group['security_group'], p.plugin_type(), 'security_group') new_sg = p.create_security_group(context, security_group, default_sg=default_sg) self._cleanup_obj_fields( new_sg, p.plugin_type(), 'security_group') return new_sg def delete_security_group(self, context, id): p = self._get_plugin_from_sg_id(context, id) p.delete_security_group(context, id) def update_security_group(self, context, id, security_group): p = self._get_plugin_from_sg_id(context, id) self._validate_obj_extensions( security_group['security_group'], p.plugin_type(), 'security_group') return p.update_security_group(context, id, security_group) def get_security_group(self, context, id, fields=None): p = self._get_plugin_from_sg_id(context, id) sg = p.get_security_group(context, id, fields=fields) self._cleanup_obj_fields( sg, p.plugin_type(), 'security_group') return sg def get_security_groups(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False, default_sg=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters) sgs = super(NsxTVDPlugin, self).get_security_groups( context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse, default_sg=default_sg) for sg in sgs[:]: p = self._get_plugin_from_project(context, sg['tenant_id']) if req_p and p != req_p: sgs.remove(sg) return sgs def create_security_group_rule_bulk(self, context, security_group_rules): p = self._get_plugin_from_project(context, context.project_id) return p.create_security_group_rule_bulk(context, security_group_rules) def create_security_group_rule(self, context, security_group_rule): p = self._get_plugin_from_project(context, context.project_id) return p.create_security_group_rule(context, security_group_rule) def delete_security_group_rule(self, context, id): rule_db = self._get_security_group_rule(context, id) sg_id = rule_db['security_group_id'] p = self._get_plugin_from_sg_id(context, sg_id) p.delete_security_group_rule(context, id) def get_security_group_rules(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters) rules = super(NsxTVDPlugin, self).get_security_group_rules( context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) for rule in rules[:]: p = self._get_plugin_from_project(context, rule['tenant_id']) if req_p and p != req_p: rules.remove(rule) return rules @staticmethod @resource_extend.extends([net_def.COLLECTION_NAME]) def _ext_extend_network_dict(result, netdb): ctx = n_context.get_admin_context() # get the core plugin as this is a static method with no 'self' plugin = directory.get_plugin() p = plugin._get_plugin_from_project(ctx, netdb['tenant_id']) with db_api.context_manager.writer.using(ctx): p._extension_manager.extend_network_dict( ctx.session, netdb, result) @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _ext_extend_port_dict(result, portdb): ctx = n_context.get_admin_context() # get the core plugin as this is a static method with no 'self' plugin = directory.get_plugin() p = plugin._get_plugin_from_project(ctx, portdb['tenant_id']) with db_api.context_manager.writer.using(ctx): p._extension_manager.extend_port_dict( ctx.session, portdb, result) @staticmethod @resource_extend.extends([subnet_def.COLLECTION_NAME]) def _ext_extend_subnet_dict(result, subnetdb): ctx = n_context.get_admin_context() # get the core plugin as this is a static method with no 'self' plugin = directory.get_plugin() p = plugin._get_plugin_from_project(ctx, subnetdb['tenant_id']) with db_api.context_manager.writer.using(ctx): p._extension_manager.extend_subnet_dict( ctx.session, subnetdb, result) def _get_project_plugin_dict(self, data): return {'id': data['project'], 'project': data['project'], 'plugin': data['plugin'], 'tenant_id': data['project']} def create_project_plugin_map(self, context, project_plugin_map, internal=False): data = project_plugin_map['project_plugin_map'] # validations: # 1. validate it doesn't already exist if nsx_db.get_project_plugin_mapping( context.session, data['project']): raise projectpluginmap.ProjectPluginAlreadyExists( project_id=data['project']) if not internal: # 2. only admin user is allowed if not context.is_admin: raise projectpluginmap.ProjectPluginAdminOnly() # 3. Validate the project id # TODO(asarfaty): Validate project id exists in keystone if not uuidutils.is_uuid_like(data['project']): raise projectpluginmap.ProjectPluginIllegalId( project_id=data['project']) # 4. Check that plugin is available if data['plugin'] not in self.plugins: raise projectpluginmap.ProjectPluginNotAvailable( plugin=data['plugin']) # Add the entry to the DB and return it LOG.info("Adding mapping between project %(project)s and plugin " "%(plugin)s", {'project': data['project'], 'plugin': data['plugin']}) nsx_db.add_project_plugin_mapping(context.session, data['project'], data['plugin']) return self._get_project_plugin_dict(data) def get_project_plugin_map(self, context, id, fields=None): data = nsx_db.get_project_plugin_mapping(context.session, id) if data: return self._get_project_plugin_dict(data) else: raise n_exc.ObjectNotFound(id=id) def get_project_plugin_maps(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # TODO(asarfaty) filter the results mappings = nsx_db.get_project_plugin_mappings(context.session) return [self._get_project_plugin_dict(data) for data in mappings] def get_plugin_type_from_project(self, context, project_id): """Get the correct plugin type for this project. Look for the project in the DB. If not there - add an entry with the default plugin """ plugin_type = self.default_plugin if not project_id: # if the project_id is empty - return the default one and do not # add to db (used by admin context to get actions) return plugin_type mapping = nsx_db.get_project_plugin_mapping( context.session, project_id) if mapping: plugin_type = mapping['plugin'] else: # add a new entry with the default plugin try: self.create_project_plugin_map( context, {'project_plugin_map': {'plugin': plugin_type, 'project': project_id}}, internal=True) except projectpluginmap.ProjectPluginAlreadyExists: # Maybe added by another thread pass if not self.plugins.get(plugin_type): msg = (_("Cannot use unsupported plugin %(plugin)s for project " "%(project)s") % {'plugin': plugin_type, 'project': project_id}) raise nsx_exc.NsxPluginException(err_msg=msg) LOG.debug("Using %s plugin for project %s", plugin_type, project_id) return plugin_type def _get_plugin_from_project(self, context, project_id): """Get the correct plugin for this project. Look for the project in the DB. If not there - add an entry with the default plugin """ plugin_type = self.get_plugin_type_from_project(context, project_id) return self.plugins[plugin_type] def get_housekeeper(self, context, name, fields=None): p = self._get_plugin_from_project(context, context.project_id) if hasattr(p, 'housekeeper'): return p.housekeeper.get(name) msg = _("Housekeeper %s not found") % name raise nsx_exc.NsxPluginException(err_msg=msg) def get_housekeepers(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): p = self._get_plugin_for_request(context, filters) if p and hasattr(p, 'housekeeper'): return p.housekeeper.list() return [] def update_housekeeper(self, context, name, housekeeper): p = self._get_plugin_from_project(context, context.project_id) if hasattr(p, 'housekeeper'): p.housekeeper.run(context, name) return p.housekeeper.get(name) def get_address_scopes(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters) address_scopes = super(NsxTVDPlugin, self).get_address_scopes( context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) for address_scope in address_scopes[:]: p = self._get_plugin_from_project(context, address_scope['tenant_id']) if req_p and p != req_p: address_scopes.remove(address_scope) return address_scopes def get_subnetpools(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters) pools = super(NsxTVDPlugin, self).get_subnetpools( context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) for pool in pools[:]: p = self._get_plugin_from_project(context, pool['tenant_id']) if req_p and p != req_p: pools.remove(pool) return pools def get_nsx_policy(self, context, id, fields=None): # Extension supported only by the nsxv plugin p = self._get_plugin_from_project(context, context.project_id) if p.plugin_type() != v.NsxVPluginV2.plugin_type(): err_msg = (_('Can not support %(field)s extension for ' '%(p)s plugin') % { 'field': 'nsx-policy', 'p': p.plugin_type()}) raise n_exc.InvalidInput(error_message=err_msg) return p.get_nsx_policy(context, id, fields=fields) def get_nsx_policies(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Extension supported only by the nsxv plugin p = self._get_plugin_from_project(context, context.project_id) if p.plugin_type() != v.NsxVPluginV2.plugin_type(): return [] return p.get_nsx_policies(context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_mh/0000775000175100017510000000000013244524600021074 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_mh/__init__.py0000666000175100017510000000000013244523345023202 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_mh/plugin.py0000666000175100017510000037757213244523345023000 0ustar zuulzuul00000000000000#m Copyright 2012 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import port_security as psec from neutron_lib.api import faults from neutron_lib.api import validators from neutron_lib import constants from neutron_lib import context as q_context from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import allowedaddresspairs as addr_exc from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.exceptions import port_security as psec_exc from oslo_concurrency import lockutils from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import excutils import six from sqlalchemy import exc as sql_exc from sqlalchemy.orm import exc as sa_exc import webob.exc from neutron.api import extensions as neutron_extensions from neutron.api.v2 import attributes as attr from neutron.db import _model_query as model_query from neutron.db import _resource_extend as resource_extend from neutron.db import _utils as db_utils from neutron.db import agentschedulers_db from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db import api as db_api from neutron.db import db_base_plugin_v2 from neutron.db import dns_db from neutron.db import external_net_db from neutron.db import extradhcpopt_db from neutron.db import extraroute_db from neutron.db import l3_attrs_db from neutron.db import l3_db from neutron.db import l3_dvr_db from neutron.db import l3_gwmode_db from neutron.db.models import l3 as l3_db_models from neutron.db.models import securitygroup as securitygroup_model # noqa from neutron.db import models_v2 from neutron.db import portbindings_db from neutron.db import portsecurity_db from neutron.db import quota_db # noqa from neutron.db import securitygroups_db from neutron.extensions import providernet from neutron.extensions import securitygroup as ext_sg from neutron.plugins.common import utils from neutron.quota import resource_registry from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext from neutron_lib.api.definitions import extraroute as xroute_apidef from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef from neutron_lib.api.definitions import portbindings as pbin from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.exceptions import extraroute as xroute_exc from neutron_lib.exceptions import multiprovidernet as mpnet_exc import vmware_nsx from vmware_nsx._i18n import _ from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import config # noqa from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import nsx_utils from vmware_nsx.common import securitygroups as sg_utils from vmware_nsx.common import sync from vmware_nsx.common import utils as c_utils from vmware_nsx.db import db as nsx_db from vmware_nsx.db import maclearning as mac_db from vmware_nsx.db import networkgw_db from vmware_nsx.db import nsx_models from vmware_nsx.db import qos_db from vmware_nsx.dhcp_meta import modes as dhcpmeta_modes from vmware_nsx.extensions import maclearning as mac_ext from vmware_nsx.extensions import networkgw from vmware_nsx.extensions import qos_queue as qos from vmware_nsx.nsxlib.mh import l2gateway as l2gwlib from vmware_nsx.nsxlib.mh import queue as queuelib from vmware_nsx.nsxlib.mh import router as routerlib from vmware_nsx.nsxlib.mh import secgroup as secgrouplib from vmware_nsx.nsxlib.mh import switch as switchlib LOG = logging.getLogger(__name__) NSX_NOSNAT_RULES_ORDER = 10 NSX_FLOATINGIP_NAT_RULES_ORDER = 224 NSX_EXTGW_NAT_RULES_ORDER = 255 NSX_DEFAULT_NEXTHOP = '1.1.1.1' class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin, agentschedulers_db.DhcpAgentSchedulerDbMixin, db_base_plugin_v2.NeutronDbPluginV2, dhcpmeta_modes.DhcpMetadataAccess, l3_dvr_db.L3_NAT_with_dvr_db_mixin, external_net_db.External_net_db_mixin, extradhcpopt_db.ExtraDhcpOptMixin, extraroute_db.ExtraRoute_db_mixin, l3_gwmode_db.L3_NAT_db_mixin, mac_db.MacLearningDbMixin, networkgw_db.NetworkGatewayMixin, portbindings_db.PortBindingMixin, portsecurity_db.PortSecurityDbMixin, qos_db.QoSDbMixin, securitygroups_db.SecurityGroupDbMixin, dns_db.DNSDbMixin): supported_extension_aliases = ["allowed-address-pairs", "binding", "dvr", "ext-gw-mode", xroute_apidef.ALIAS, "mac-learning", "multi-provider", "network-gateway", "port-security", "provider", "qos-queue", "quotas", "external-net", "extra_dhcp_opt", "router", "security-group", constants.SUBNET_ALLOCATION_EXT_ALIAS] __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True # Map nova zones to cluster for easy retrieval novazone_cluster_map = {} @resource_registry.tracked_resources( network=models_v2.Network, port=models_v2.Port, subnet=models_v2.Subnet, subnetpool=models_v2.SubnetPool, security_group=securitygroup_model.SecurityGroup, security_group_rule=securitygroup_model.SecurityGroupRule, router=l3_db_models.Router, floatingip=l3_db_models.FloatingIP) def __init__(self): LOG.warning("The NSX-MH plugin is deprecated and may be removed " "in the O or the P cycle") super(NsxPluginV2, self).__init__() # TODO(salv-orlando): Replace These dicts with # collections.defaultdict for better handling of default values # Routines for managing logical ports in NSX self.port_special_owners = [l3_db.DEVICE_OWNER_ROUTER_GW, l3_db.DEVICE_OWNER_ROUTER_INTF] self._port_drivers = { 'create': {constants.DEVICE_OWNER_ROUTER_GW: self._nsx_create_ext_gw_port, constants.DEVICE_OWNER_FLOATINGIP: self._nsx_create_fip_port, constants.DEVICE_OWNER_ROUTER_INTF: self._nsx_create_router_port, constants.DEVICE_OWNER_DVR_INTERFACE: self._nsx_create_router_port, networkgw_db.DEVICE_OWNER_NET_GW_INTF: self._nsx_create_l2_gw_port, 'default': self._nsx_create_port}, 'delete': {constants.DEVICE_OWNER_ROUTER_GW: self._nsx_delete_ext_gw_port, constants.DEVICE_OWNER_ROUTER_INTF: self._nsx_delete_router_port, constants.DEVICE_OWNER_DVR_INTERFACE: self._nsx_delete_router_port, constants.DEVICE_OWNER_FLOATINGIP: self._nsx_delete_fip_port, networkgw_db.DEVICE_OWNER_NET_GW_INTF: self._nsx_delete_port, 'default': self._nsx_delete_port} } neutron_extensions.append_api_extensions_path( [vmware_nsx.NSX_EXT_PATH]) self.cfg_group = 'NSX' # group name for nsx section in nsx.ini self.nsx_opts = cfg.CONF.NSX self.nsx_sync_opts = cfg.CONF.NSX_SYNC self.cluster = nsx_utils.create_nsx_cluster( cfg.CONF, self.nsx_opts.concurrent_connections, self.nsx_opts.nsx_gen_timeout) self.base_binding_dict = { pbin.VIF_TYPE: pbin.VIF_TYPE_OVS, pbin.VIF_DETAILS: { # TODO(rkukura): Replace with new VIF security details pbin.CAP_PORT_FILTER: 'security-group' in self.supported_extension_aliases}} self._extend_fault_map() self.setup_dhcpmeta_access() # Set this flag to false as the default gateway has not # been yet updated from the config file self._is_default_net_gw_in_sync = False # Create a synchronizer instance for backend sync self._synchronizer = sync.NsxSynchronizer( self.safe_reference, self.cluster, self.nsx_sync_opts.state_sync_interval, self.nsx_sync_opts.min_sync_req_delay, self.nsx_sync_opts.min_chunk_size, self.nsx_sync_opts.max_random_sync_delay) def _ensure_default_network_gateway(self): if self._is_default_net_gw_in_sync: return # Add the gw in the db as default, and unset any previous default def_l2_gw_uuid = self.cluster.default_l2_gw_service_uuid try: ctx = q_context.get_admin_context() self._unset_default_network_gateways(ctx) if not def_l2_gw_uuid: return try: def_network_gw = self._get_network_gateway(ctx, def_l2_gw_uuid) except networkgw_db.GatewayNotFound: # Create in DB only - don't go to backend def_gw_data = {'id': def_l2_gw_uuid, 'name': 'default L2 gateway service', 'devices': [], 'tenant_id': ctx.tenant_id} gw_res_name = networkgw.GATEWAY_RESOURCE_NAME.replace('-', '_') def_network_gw = super( NsxPluginV2, self).create_network_gateway( ctx, {gw_res_name: def_gw_data}) # In any case set is as default self._set_default_network_gateway(ctx, def_network_gw['id']) # Ensure this method is executed only once self._is_default_net_gw_in_sync = True except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Unable to process default l2 gw service: " "%s", def_l2_gw_uuid) def _build_ip_address_list(self, context, fixed_ips, subnet_ids=None): """Build ip_addresses data structure for logical router port. No need to perform validation on IPs - this has already been done in the l3_db mixin class. """ ip_addresses = [] for ip in fixed_ips: if not subnet_ids or (ip['subnet_id'] in subnet_ids): subnet = self._get_subnet(context, ip['subnet_id']) ip_prefix = '%s/%s' % (ip['ip_address'], subnet['cidr'].split('/')[1]) ip_addresses.append(ip_prefix) return ip_addresses def _create_and_attach_router_port(self, cluster, context, nsx_router_id, port_data, attachment_type, attachment, attachment_vlan=None, subnet_ids=None): # Use a fake IP address if gateway port is not 'real' ip_addresses = (port_data.get('fake_ext_gw') and ['0.0.0.0/31'] or self._build_ip_address_list(context, port_data['fixed_ips'], subnet_ids)) try: lrouter_port = routerlib.create_router_lport( cluster, nsx_router_id, port_data.get('tenant_id', 'fake'), port_data.get('id', 'fake'), port_data.get('name', 'fake'), port_data.get('admin_state_up', True), ip_addresses, port_data.get('mac_address')) LOG.debug("Created NSX router port:%s", lrouter_port['uuid']) except api_exc.NsxApiException: LOG.exception("Unable to create port on NSX logical router " "%s", nsx_router_id) raise nsx_exc.NsxPluginException( err_msg=_("Unable to create logical router port for neutron " "port id %(port_id)s on router %(nsx_router_id)s") % {'port_id': port_data.get('id'), 'nsx_router_id': nsx_router_id}) self._update_router_port_attachment(cluster, context, nsx_router_id, port_data, lrouter_port['uuid'], attachment_type, attachment, attachment_vlan) return lrouter_port def _update_router_gw_info(self, context, router_id, info): # NOTE(salvatore-orlando): We need to worry about rollback of NSX # configuration in case of failures in the process # Ref. LP bug 1102301 router = self._get_router(context, router_id) # Check whether SNAT rule update should be triggered # NSX also supports multiple external networks so there is also # the possibility that NAT rules should be replaced current_ext_net_id = router.gw_port_id and router.gw_port.network_id new_ext_net_id = info and info.get('network_id') # SNAT should be enabled unless info['enable_snat'] is # explicitly set to false enable_snat = new_ext_net_id and info.get('enable_snat', True) # Remove if ext net removed, changed, or if snat disabled remove_snat_rules = (current_ext_net_id and new_ext_net_id != current_ext_net_id or router.enable_snat and not enable_snat) # Add rules if snat is enabled, and if either the external network # changed or snat was previously disabled # NOTE: enable_snat == True implies new_ext_net_id != None add_snat_rules = (enable_snat and (new_ext_net_id != current_ext_net_id or not router.enable_snat)) router = super(NsxPluginV2, self)._update_router_gw_info( context, router_id, info, router=router) # Add/Remove SNAT rules as needed # Create an elevated context for dealing with metadata access # cidrs which are created within admin context ctx_elevated = context.elevated() if remove_snat_rules or add_snat_rules: cidrs = self._find_router_subnets_cidrs(ctx_elevated, router_id) nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) if remove_snat_rules: # Be safe and concede NAT rules might not exist. # Therefore, use min_num_expected=0 for cidr in cidrs: routerlib.delete_nat_rules_by_match( self.cluster, nsx_router_id, "SourceNatRule", max_num_expected=1, min_num_expected=0, raise_on_len_mismatch=False, source_ip_addresses=cidr) if add_snat_rules: ip_addresses = self._build_ip_address_list( ctx_elevated, router.gw_port['fixed_ips']) # Set the SNAT rule for each subnet (only first IP) for cidr in cidrs: cidr_prefix = int(cidr.split('/')[1]) routerlib.create_lrouter_snat_rule( self.cluster, nsx_router_id, ip_addresses[0].split('/')[0], ip_addresses[0].split('/')[0], order=NSX_EXTGW_NAT_RULES_ORDER - cidr_prefix, match_criteria={'source_ip_addresses': cidr}) def _update_router_port_attachment(self, cluster, context, nsx_router_id, port_data, nsx_router_port_id, attachment_type, attachment, attachment_vlan=None): if not nsx_router_port_id: nsx_router_port_id = self._find_router_gw_port(context, port_data) try: routerlib.plug_router_port_attachment(cluster, nsx_router_id, nsx_router_port_id, attachment, attachment_type, attachment_vlan) LOG.debug("Attached %(att)s to NSX router port %(port)s", {'att': attachment, 'port': nsx_router_port_id}) except api_exc.NsxApiException: # Must remove NSX logical port routerlib.delete_router_lport(cluster, nsx_router_id, nsx_router_port_id) LOG.exception("Unable to plug attachment in NSX logical " "router port %(r_port_id)s, associated with " "Neutron %(q_port_id)s", {'r_port_id': nsx_router_port_id, 'q_port_id': port_data.get('id')}) raise nsx_exc.NsxPluginException( err_msg=(_("Unable to plug attachment in router port " "%(r_port_id)s for neutron port id %(q_port_id)s " "on router %(router_id)s") % {'r_port_id': nsx_router_port_id, 'q_port_id': port_data.get('id'), 'router_id': nsx_router_id})) def _get_port_by_device_id(self, context, device_id, device_owner): """Retrieve ports associated with a specific device id. Used for retrieving all neutron ports attached to a given router. """ port_qry = context.session.query(models_v2.Port) return port_qry.filter_by( device_id=device_id, device_owner=device_owner,).all() def _find_router_subnets_cidrs(self, context, router_id): """Retrieve subnets attached to the specified router.""" ports = self._get_port_by_device_id(context, router_id, l3_db.DEVICE_OWNER_ROUTER_INTF) # No need to check for overlapping CIDRs cidrs = [] for port in ports: for ip in port.get('fixed_ips', []): cidrs.append(self._get_subnet(context, ip.subnet_id).cidr) return cidrs def _nsx_find_lswitch_for_port(self, context, port_data): network = self._get_network(context, port_data['network_id']) return self._handle_lswitch_selection( context, self.cluster, network) def _nsx_create_port_helper(self, session, ls_uuid, port_data, do_port_security=True): # Convert Neutron security groups identifiers into NSX security # profiles identifiers nsx_sec_profile_ids = [ nsx_utils.get_nsx_security_group_id( session, self.cluster, neutron_sg_id) for neutron_sg_id in (port_data[ext_sg.SECURITYGROUPS] or [])] return switchlib.create_lport(self.cluster, ls_uuid, port_data['tenant_id'], port_data['id'], port_data['name'], port_data['device_id'], port_data['admin_state_up'], port_data['mac_address'], port_data['fixed_ips'], port_data[psec.PORTSECURITY], nsx_sec_profile_ids, port_data.get(qos.QUEUE), port_data.get(mac_ext.MAC_LEARNING), port_data.get(addr_apidef.ADDRESS_PAIRS)) def _handle_create_port_exception(self, context, port_id, ls_uuid, lp_uuid): with excutils.save_and_reraise_exception(): # rollback nsx logical port only if it was successfully # created on NSX. Should this command fail the original # exception will be raised. if lp_uuid: # Remove orphaned port from NSX switchlib.delete_port(self.cluster, ls_uuid, lp_uuid) # rollback the neutron-nsx port mapping nsx_db.delete_neutron_nsx_port_mapping(context.session, port_id) LOG.exception("An exception occurred while creating the " "neutron port %s on the NSX plaform", port_id) def _nsx_create_port(self, context, port_data): """Driver for creating a logical switch port on NSX platform.""" # FIXME(salvatore-orlando): On the NSX platform we do not really have # external networks. So if as user tries and create a "regular" VIF # port on an external network we are unable to actually create. # However, in order to not break unit tests, we need to still create # the DB object and return success if self._network_is_external(context, port_data['network_id']): LOG.info("NSX plugin does not support regular VIF ports on " "external networks. Port %s will be down.", port_data['network_id']) # No need to actually update the DB state - the default is down return port_data lport = None selected_lswitch = None try: selected_lswitch = self._nsx_find_lswitch_for_port(context, port_data) lport = self._nsx_create_port_helper(context.session, selected_lswitch['uuid'], port_data, True) nsx_db.add_neutron_nsx_port_mapping( context.session, port_data['id'], selected_lswitch['uuid'], lport['uuid']) if port_data['device_owner'] not in self.port_special_owners: switchlib.plug_vif_interface( self.cluster, selected_lswitch['uuid'], lport['uuid'], "VifAttachment", port_data['id']) LOG.debug("_nsx_create_port completed for port %(name)s " "on network %(network_id)s. The new port id is " "%(id)s.", port_data) except (api_exc.NsxApiException, n_exc.NeutronException): self._handle_create_port_exception( context, port_data['id'], selected_lswitch and selected_lswitch['uuid'], lport and lport['uuid']) except db_exc.DBError as e: if (port_data['device_owner'] == constants.DEVICE_OWNER_DHCP and isinstance(e.inner_exception, sql_exc.IntegrityError)): LOG.warning( "Concurrent network deletion detected; Back-end " "Port %(nsx_id)s creation to be rolled back for " "Neutron port: %(neutron_id)s", {'nsx_id': lport['uuid'], 'neutron_id': port_data['id']}) if selected_lswitch and lport: try: switchlib.delete_port(self.cluster, selected_lswitch['uuid'], lport['uuid']) except n_exc.NotFound: LOG.debug("NSX Port %s already gone", lport['uuid']) def _nsx_delete_port(self, context, port_data): # FIXME(salvatore-orlando): On the NSX platform we do not really have # external networks. So deleting regular ports from external networks # does not make sense. However we cannot raise as this would break # unit tests. if self._network_is_external(context, port_data['network_id']): LOG.info("NSX plugin does not support regular VIF ports on " "external networks. Port %s will be down.", port_data['network_id']) return nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( context.session, self.cluster, port_data['id']) if not nsx_port_id: LOG.debug("Port '%s' was already deleted on NSX platform", id) return # TODO(bgh): if this is a bridged network and the lswitch we just got # back will have zero ports after the delete we should garbage collect # the lswitch. try: switchlib.delete_port(self.cluster, nsx_switch_id, nsx_port_id) LOG.debug("_nsx_delete_port completed for port %(port_id)s " "on network %(net_id)s", {'port_id': port_data['id'], 'net_id': port_data['network_id']}) except n_exc.NotFound: LOG.warning("Port %s not found in NSX", port_data['id']) def _nsx_delete_router_port(self, context, port_data): # Delete logical router port nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, port_data['device_id']) nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( context.session, self.cluster, port_data['id']) if not nsx_port_id: LOG.warning( "Neutron port %(port_id)s not found on NSX backend. " "Terminating delete operation. A dangling router port " "might have been left on router %(router_id)s", {'port_id': port_data['id'], 'router_id': nsx_router_id}) return try: routerlib.delete_peer_router_lport(self.cluster, nsx_router_id, nsx_switch_id, nsx_port_id) except api_exc.NsxApiException: # Do not raise because the issue might as well be that the # router has already been deleted, so there would be nothing # to do here LOG.exception("Ignoring exception as this means the peer " "for port '%s' has already been deleted.", nsx_port_id) # Delete logical switch port self._nsx_delete_port(context, port_data) def _nsx_create_router_port(self, context, port_data): """Driver for creating a switch port to be connected to a router.""" # No router ports on external networks! if self._network_is_external(context, port_data['network_id']): raise nsx_exc.NsxPluginException( err_msg=(_("It is not allowed to create router interface " "ports on external networks as '%s'") % port_data['network_id'])) ls_port = None selected_lswitch = None try: selected_lswitch = self._nsx_find_lswitch_for_port( context, port_data) # Do not apply port security here! ls_port = self._nsx_create_port_helper( context.session, selected_lswitch['uuid'], port_data, False) # Assuming subnet being attached is on first fixed ip # element in port data subnet_id = None if len(port_data['fixed_ips']): subnet_id = port_data['fixed_ips'][0]['subnet_id'] nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, port_data['device_id']) # Create peer port on logical router self._create_and_attach_router_port( self.cluster, context, nsx_router_id, port_data, "PatchAttachment", ls_port['uuid'], subnet_ids=[subnet_id]) nsx_db.add_neutron_nsx_port_mapping( context.session, port_data['id'], selected_lswitch['uuid'], ls_port['uuid']) LOG.debug("_nsx_create_router_port completed for port " "%(name)s on network %(network_id)s. The new " "port id is %(id)s.", port_data) except (api_exc.NsxApiException, n_exc.NeutronException): self._handle_create_port_exception( context, port_data['id'], selected_lswitch and selected_lswitch['uuid'], ls_port and ls_port['uuid']) def _find_router_gw_port(self, context, port_data): router_id = port_data['device_id'] if not router_id: raise n_exc.BadRequest(_("device_id field must be populated in " "order to create an external gateway " "port for network %s"), port_data['network_id']) nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) lr_port = routerlib.find_router_gw_port(context, self.cluster, nsx_router_id) if not lr_port: raise nsx_exc.NsxPluginException( err_msg=(_("The gateway port for the NSX router %s " "was not found on the backend") % nsx_router_id)) return lr_port @lockutils.synchronized('vmware', 'neutron-') def _nsx_create_ext_gw_port(self, context, port_data): """Driver for creating an external gateway port on NSX platform.""" # TODO(salvatore-orlando): Handle NSX resource # rollback when something goes not quite as expected lr_port = self._find_router_gw_port(context, port_data) ip_addresses = self._build_ip_address_list(context, port_data['fixed_ips']) # This operation actually always updates a NSX logical port # instead of creating one. This is because the gateway port # is created at the same time as the NSX logical router, otherwise # the fabric status of the NSX router will be down. # admin_status should always be up for the gateway port # regardless of what the user specifies in neutron nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, port_data['device_id']) routerlib.update_router_lport(self.cluster, nsx_router_id, lr_port['uuid'], port_data['tenant_id'], port_data['id'], port_data['name'], True, ip_addresses) ext_network = self.get_network(context, port_data['network_id']) if ext_network.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.L3_EXT: # Update attachment physical_network = (ext_network[pnet.PHYSICAL_NETWORK] or self.cluster.default_l3_gw_service_uuid) self._update_router_port_attachment( self.cluster, context, nsx_router_id, port_data, lr_port['uuid'], "L3GatewayAttachment", physical_network, ext_network[pnet.SEGMENTATION_ID]) LOG.debug("_nsx_create_ext_gw_port completed on external network " "%(ext_net_id)s, attached to router:%(router_id)s. " "NSX port id is %(nsx_port_id)s", {'ext_net_id': port_data['network_id'], 'router_id': nsx_router_id, 'nsx_port_id': lr_port['uuid']}) @lockutils.synchronized('vmware', 'neutron-') def _nsx_delete_ext_gw_port(self, context, port_data): # TODO(salvatore-orlando): Handle NSX resource # rollback when something goes not quite as expected try: router_id = port_data['device_id'] nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) if not nsx_router_id: LOG.debug("No object found on backend for router %s. This " "that the router was already deleted and no " "further action is needed for resetting the " "external gateway port", router_id) return lr_port = self._find_router_gw_port(context, port_data) # Delete is actually never a real delete, otherwise the NSX # logical router will stop working routerlib.update_router_lport(self.cluster, nsx_router_id, lr_port['uuid'], port_data['tenant_id'], port_data['id'], port_data['name'], True, ['0.0.0.0/31']) # Reset attachment self._update_router_port_attachment( self.cluster, context, nsx_router_id, port_data, lr_port['uuid'], "L3GatewayAttachment", self.cluster.default_l3_gw_service_uuid) LOG.debug("_nsx_delete_ext_gw_port completed on external network " "%(ext_net_id)s, attached to NSX router:%(router_id)s", {'ext_net_id': port_data['network_id'], 'router_id': nsx_router_id}) except n_exc.NotFound: LOG.debug("Logical router resource %s not found " "on NSX platform : the router may have " "already been deleted", port_data['device_id']) except api_exc.NsxApiException: raise nsx_exc.NsxPluginException( err_msg=_("Unable to update logical router" "on NSX Platform")) def _nsx_create_l2_gw_port(self, context, port_data): """Create a switch port, and attach it to a L2 gateway attachment.""" # FIXME(salvatore-orlando): On the NSX platform we do not really have # external networks. So if as user tries and create a "regular" VIF # port on an external network we are unable to actually create. # However, in order to not break unit tests, we need to still create # the DB object and return success if self._network_is_external(context, port_data['network_id']): LOG.info("NSX plugin does not support regular VIF ports on " "external networks. Port %s will be down.", port_data['network_id']) # No need to actually update the DB state - the default is down return port_data lport = None try: selected_lswitch = self._nsx_find_lswitch_for_port( context, port_data) lport = self._nsx_create_port_helper( context.session, selected_lswitch['uuid'], port_data, True) nsx_db.add_neutron_nsx_port_mapping( context.session, port_data['id'], selected_lswitch['uuid'], lport['uuid']) l2gwlib.plug_l2_gw_service( self.cluster, selected_lswitch['uuid'], lport['uuid'], port_data['device_id'], int(port_data.get('gw:segmentation_id') or 0)) except Exception: with excutils.save_and_reraise_exception(): if lport: switchlib.delete_port(self.cluster, selected_lswitch['uuid'], lport['uuid']) LOG.debug("_nsx_create_l2_gw_port completed for port %(name)s " "on network %(network_id)s. The new port id " "is %(id)s.", port_data) def _nsx_create_fip_port(self, context, port_data): # As we do not create ports for floating IPs in NSX, # this is a no-op driver pass def _nsx_delete_fip_port(self, context, port_data): # As we do not create ports for floating IPs in NSX, # this is a no-op driver pass def _extend_fault_map(self): """Extends the Neutron Fault Map. Exceptions specific to the NSX Plugin are mapped to standard HTTP Exceptions. """ faults.FAULT_MAP.update({nsx_exc.InvalidNovaZone: webob.exc.HTTPBadRequest, nsx_exc.NoMorePortsException: webob.exc.HTTPBadRequest, nsx_exc.MaintenanceInProgress: webob.exc.HTTPServiceUnavailable, nsx_exc.InvalidSecurityCertificate: webob.exc.HTTPBadRequest}) def _validate_provider_create(self, context, network): segments = network.get(mpnet_apidef.SEGMENTS) if not validators.is_attr_set(segments): return mpnet_apidef.check_duplicate_segments(segments) for segment in segments: network_type = segment.get(pnet.NETWORK_TYPE) physical_network = segment.get(pnet.PHYSICAL_NETWORK) physical_network_set = validators.is_attr_set(physical_network) segmentation_id = segment.get(pnet.SEGMENTATION_ID) network_type_set = validators.is_attr_set(network_type) segmentation_id_set = validators.is_attr_set(segmentation_id) # If the physical_network_uuid isn't passed in use the default one. if not physical_network_set: physical_network = cfg.CONF.default_tz_uuid err_msg = None if not network_type_set: err_msg = _("%s required") % pnet.NETWORK_TYPE elif network_type in (c_utils.NetworkTypes.GRE, c_utils.NetworkTypes.STT, c_utils.NetworkTypes.FLAT): if segmentation_id_set: err_msg = _("Segmentation ID cannot be specified with " "flat network type") elif network_type == c_utils.NetworkTypes.VLAN: if not segmentation_id_set: err_msg = _("Segmentation ID must be specified with " "vlan network type") elif (segmentation_id_set and not utils.is_valid_vlan_tag(segmentation_id)): err_msg = (_("%(segmentation_id)s out of range " "(%(min_id)s through %(max_id)s)") % {'segmentation_id': segmentation_id, 'min_id': constants.MIN_VLAN_TAG, 'max_id': constants.MAX_VLAN_TAG}) else: # Verify segment is not already allocated bindings = ( nsx_db.get_network_bindings_by_vlanid_and_physical_net( context.session, segmentation_id, physical_network) ) if bindings: raise n_exc.VlanIdInUse( vlan_id=segmentation_id, physical_network=physical_network) elif network_type == c_utils.NetworkTypes.L3_EXT: if (segmentation_id_set and not utils.is_valid_vlan_tag(segmentation_id)): err_msg = (_("%(segmentation_id)s out of range " "(%(min_id)s through %(max_id)s)") % {'segmentation_id': segmentation_id, 'min_id': constants.MIN_VLAN_TAG, 'max_id': constants.MAX_VLAN_TAG}) # Network must be external if not network.get(extnet_apidef.EXTERNAL): err_msg = (_("The l3_ext provide network type can be " "used with external networks only")) else: err_msg = (_("%(net_type_param)s %(net_type_value)s not " "supported") % {'net_type_param': pnet.NETWORK_TYPE, 'net_type_value': network_type}) if err_msg: raise n_exc.InvalidInput(error_message=err_msg) # TODO(salvatore-orlando): Validate tranport zone uuid # which should be specified in physical_network def _extend_network_dict_provider(self, context, network, multiprovider=None, bindings=None): if not bindings: bindings = nsx_db.get_network_bindings(context.session, network['id']) if not multiprovider: multiprovider = nsx_db.is_multiprovider_network(context.session, network['id']) # With NSX plugin 'normal' overlay networks will have no binding # TODO(salvatore-orlando) make sure users can specify a distinct # phy_uuid as 'provider network' for STT net type if bindings: if not multiprovider: # network came in through provider networks api network[pnet.NETWORK_TYPE] = bindings[0].binding_type network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id else: # network come in though multiprovider networks api network[mpnet_apidef.SEGMENTS] = [ {pnet.NETWORK_TYPE: binding.binding_type, pnet.PHYSICAL_NETWORK: binding.phy_uuid, pnet.SEGMENTATION_ID: binding.vlan_id} for binding in bindings] def extend_port_dict_binding(self, port_res, port_db): super(NsxPluginV2, self).extend_port_dict_binding(port_res, port_db) port_res[pbin.VNIC_TYPE] = pbin.VNIC_NORMAL def _handle_lswitch_selection(self, context, cluster, network): # NOTE(salv-orlando): This method used to select a NSX logical switch # with an available port, and create a new logical switch if # necessary. As there is no more need to perform switch chaining in # NSX, the logic for creating a new logical switch has been removed. max_ports = self.nsx_opts.max_lp_per_overlay_ls network_bindings = nsx_db.get_network_bindings( context.session, network['id']) for network_binding in network_bindings: if network_binding.binding_type in (c_utils.NetworkTypes.FLAT, c_utils.NetworkTypes.VLAN): max_ports = self.nsx_opts.max_lp_per_bridged_ls # This is still necessary as there could be chained switches in # the deployment and the code needs to find the first one with # an available slot for a port lswitches = nsx_utils.fetch_nsx_switches( context.session, cluster, network['id']) try: return [ls for ls in lswitches if (ls['_relations']['LogicalSwitchStatus'] ['lport_count'] < max_ports)].pop(0) except IndexError: # Too bad, no switch where a port can be created LOG.debug("No switch has available ports (%d checked)", len(lswitches)) raise nsx_exc.NoMorePortsException(network=network.id) def _convert_to_nsx_transport_zones(self, cluster, network=None, bindings=None): # TODO(salv-orlando): Remove this method and call nsx-utils direct return nsx_utils.convert_to_nsx_transport_zones( cluster.default_tz_uuid, network, bindings, default_transport_type=cfg.CONF.NSX.default_transport_type) def _convert_to_transport_zones_dict(self, network): """Converts the provider request body to multiprovider. Returns: True if request is multiprovider False if provider and None if neither. """ if any(validators.is_attr_set(network.get(f)) for f in (pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)): if validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)): raise mpnet_exc.SegmentsSetInConjunctionWithProviders() # convert to transport zone list network[mpnet_apidef.SEGMENTS] = [ {pnet.NETWORK_TYPE: network[pnet.NETWORK_TYPE], pnet.PHYSICAL_NETWORK: network[pnet.PHYSICAL_NETWORK], pnet.SEGMENTATION_ID: network[pnet.SEGMENTATION_ID]}] del network[pnet.NETWORK_TYPE] del network[pnet.PHYSICAL_NETWORK] del network[pnet.SEGMENTATION_ID] return False if validators.is_attr_set(mpnet_apidef.SEGMENTS): return True def create_network(self, context, network): net_data = network['network'] tenant_id = net_data['tenant_id'] self._ensure_default_security_group(context, tenant_id) # Process the provider network extension provider_type = self._convert_to_transport_zones_dict(net_data) self._validate_provider_create(context, net_data) # Replace ATTR_NOT_SPECIFIED with None before sending to NSX for key, value in six.iteritems(network['network']): if value is constants.ATTR_NOT_SPECIFIED: net_data[key] = None # FIXME(arosen) implement admin_state_up = False in NSX if net_data['admin_state_up'] is False: LOG.warning("Network with admin_state_up=False are not yet " "supported by this plugin. Ignoring setting for " "network %s", net_data.get('name', '')) transport_zone_config = self._convert_to_nsx_transport_zones( self.cluster, net_data) external = net_data.get(extnet_apidef.EXTERNAL) # NOTE(salv-orlando): Pre-generating uuid for Neutron # network. This will be removed once the network create operation # becomes an asynchronous task net_data['id'] = str(uuid.uuid4()) if (not validators.is_attr_set(external) or validators.is_attr_set(external) and not external): lswitch = switchlib.create_lswitch( self.cluster, net_data['id'], tenant_id, net_data.get('name'), transport_zone_config, shared=net_data.get(attr.SHARED)) with db_api.context_manager.writer.using(context): new_net = super(NsxPluginV2, self).create_network(context, network) # Process port security extension self._process_network_port_security_create( context, net_data, new_net) # DB Operations for setting the network as external self._process_l3_create(context, new_net, net_data) # Process QoS queue extension net_queue_id = net_data.get(qos.QUEUE) if net_queue_id: # Raises if not found self.get_qos_queue(context, net_queue_id) self._process_network_queue_mapping( context, new_net, net_queue_id) # Add mapping between neutron network and NSX switch if (not validators.is_attr_set(external) or validators.is_attr_set(external) and not external): nsx_db.add_neutron_nsx_network_mapping( context.session, new_net['id'], lswitch['uuid']) if (net_data.get(mpnet_apidef.SEGMENTS) and isinstance(provider_type, bool)): net_bindings = [] for tz in net_data[mpnet_apidef.SEGMENTS]: segmentation_id = tz.get(pnet.SEGMENTATION_ID, 0) segmentation_id_set = validators.is_attr_set( segmentation_id) if not segmentation_id_set: segmentation_id = 0 net_bindings.append(nsx_db.add_network_binding( context.session, new_net['id'], tz.get(pnet.NETWORK_TYPE), tz.get(pnet.PHYSICAL_NETWORK), segmentation_id)) if provider_type: nsx_db.set_multiprovider_network(context.session, new_net['id']) self._extend_network_dict_provider(context, new_net, provider_type, net_bindings) # this extra lookup is necessary to get the # latest db model for the extension functions net_model = self._get_network(context, new_net['id']) resource_extend.apply_funcs('networks', new_net, net_model) self.handle_network_dhcp_access(context, new_net, action='create_network') return new_net def delete_network(self, context, id): external = self._network_is_external(context, id) # Before removing entry from Neutron DB, retrieve NSX switch # identifiers for removing them from backend if not external: lswitch_ids = nsx_utils.get_nsx_switch_ids( context.session, self.cluster, id) self._process_l3_delete(context, id) nsx_db.delete_network_bindings(context.session, id) super(NsxPluginV2, self).delete_network(context, id) # Do not go to NSX for external networks if not external: try: switchlib.delete_networks(self.cluster, id, lswitch_ids) except n_exc.NotFound: LOG.warning("The following logical switches were not " "found on the NSX backend:%s", lswitch_ids) self.handle_network_dhcp_access(context, id, action='delete_network') LOG.debug("Delete network complete for network: %s", id) def get_network(self, context, id, fields=None): with db_api.context_manager.writer.using(context): # goto to the plugin DB and fetch the network network = self._get_network(context, id) if (self.nsx_sync_opts.always_read_status or fields and 'status' in fields): # External networks are not backed by nsx lswitches if not network.external: # Perform explicit state synchronization self._synchronizer.synchronize_network(context, network) # Don't do field selection here otherwise we won't be able # to add provider networks fields net_result = self._make_network_dict(network, context=context) self._extend_network_dict_provider(context, net_result) return db_utils.resource_fields(net_result, fields) def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} with db_api.context_manager.reader.using(context): networks = ( super(NsxPluginV2, self).get_networks( context, filters, fields, sorts, limit, marker, page_reverse)) for net in networks: self._extend_network_dict_provider(context, net) return (networks if not fields else [db_utils.resource_fields(network, fields) for network in networks]) def update_network(self, context, id, network): providernet._raise_if_updates_provider_attributes(network['network']) if network["network"].get("admin_state_up") is False: raise NotImplementedError(_("admin_state_up=False networks " "are not supported.")) with db_api.context_manager.writer.using(context): net = super(NsxPluginV2, self).update_network(context, id, network) if psec.PORTSECURITY in network['network']: self._process_network_port_security_update( context, network['network'], net) net_queue_id = network['network'].get(qos.QUEUE) if net_queue_id: self._delete_network_queue_mapping(context, id) self._process_network_queue_mapping(context, net, net_queue_id) self._process_l3_update(context, net, network['network']) self._extend_network_dict_provider(context, net) # If provided, update port name on backend; treat backend failures as # not critical (log error, but do not raise) if 'name' in network['network']: # in case of chained switches update name only for the first one nsx_switch_ids = nsx_utils.get_nsx_switch_ids( context.session, self.cluster, id) if not nsx_switch_ids or len(nsx_switch_ids) < 1: LOG.warning("Unable to find NSX mappings for neutron " "network:%s", id) try: switchlib.update_lswitch(self.cluster, nsx_switch_ids[0], network['network']['name']) except api_exc.NsxApiException as e: LOG.warning("Logical switch update on NSX backend failed. " "Neutron network id:%(net_id)s; " "NSX lswitch id:%(lswitch_id)s;" "Error:%(error)s", {'net_id': id, 'lswitch_id': nsx_switch_ids[0], 'error': e}) return net def create_port(self, context, port): # If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED # then we pass the port to the policy engine. The reason why we don't # pass the value to the policy engine when the port is # ATTR_NOT_SPECIFIED is for the case where a port is created on a # shared network that is not owned by the tenant. port_data = port['port'] dhcp_opts = port_data.get(edo_ext.EXTRADHCPOPTS, []) # Set port status as 'DOWN'. This will be updated by backend sync. port_data['status'] = constants.PORT_STATUS_DOWN with db_api.context_manager.writer.using(context): # First we allocate port in neutron database neutron_db = super(NsxPluginV2, self).create_port(context, port) neutron_port_id = neutron_db['id'] # Update fields obtained from neutron db (eg: MAC address) port["port"].update(neutron_db) self.handle_port_metadata_access(context, neutron_db) # port security extension checks (port_security, has_ip) = self._determine_port_security_and_has_ip( context, port_data) port_data[psec.PORTSECURITY] = port_security self._process_port_port_security_create( context, port_data, neutron_db) # allowed address pair checks if validators.is_attr_set(port_data.get( addr_apidef.ADDRESS_PAIRS)): if not port_security: raise addr_exc.AddressPairAndPortSecurityRequired() else: self._process_create_allowed_address_pairs( context, neutron_db, port_data[addr_apidef.ADDRESS_PAIRS]) else: # remove ATTR_NOT_SPECIFIED port_data[addr_apidef.ADDRESS_PAIRS] = [] # security group extension checks # NOTE: check_update_has_security_groups works fine for # create operations as well if port_security and has_ip: self._ensure_default_security_group_on_port(context, port) elif self._check_update_has_security_groups( {'port': port_data}): raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() port_data[ext_sg.SECURITYGROUPS] = ( self._get_security_groups_on_port(context, port)) self._process_port_create_security_group( context, port_data, port_data[ext_sg.SECURITYGROUPS]) # QoS extension checks port_queue_id = self._check_for_queue_and_create( context, port_data) self._process_port_queue_mapping( context, port_data, port_queue_id) if (isinstance(port_data.get(mac_ext.MAC_LEARNING), bool)): self._create_mac_learning_state(context, port_data) elif mac_ext.MAC_LEARNING in port_data: port_data.pop(mac_ext.MAC_LEARNING) self._process_portbindings_create_and_update(context, port['port'], port_data) self._process_port_create_extra_dhcp_opts(context, port_data, dhcp_opts) # For some reason the port bindings DB mixin does not handle # the VNIC_TYPE attribute, which is required by nova for # setting up VIFs. context.session.flush() port_data[pbin.VNIC_TYPE] = pbin.VNIC_NORMAL # DB Operation is complete, perform NSX operation try: port_data = port['port'].copy() port_create_func = self._port_drivers['create'].get( port_data['device_owner'], self._port_drivers['create']['default']) port_create_func(context, port_data) LOG.debug("port created on NSX backend for tenant " "%(tenant_id)s: (%(id)s)", port_data) except n_exc.NotFound: LOG.warning("Logical switch for network %s was not " "found in NSX.", port_data['network_id']) # Put port in error on neutron DB with db_api.context_manager.writer.using(context): port = self._get_port(context, neutron_port_id) port_data['status'] = constants.PORT_STATUS_ERROR port['status'] = port_data['status'] context.session.add(port) except Exception: # Port must be removed from neutron DB with excutils.save_and_reraise_exception(): LOG.error("Unable to create port or set port " "attachment in NSX.") with db_api.context_manager.writer.using(context): self.ipam.delete_port(context, neutron_port_id) # this extra lookup is necessary to get the # latest db model for the extension functions port_model = self._get_port(context, neutron_port_id) resource_extend.apply_funcs('ports', port_data, port_model) self.handle_port_dhcp_access(context, port_data, action='create_port') return port_data def update_port(self, context, id, port): delete_security_groups = self._check_update_deletes_security_groups( port) has_security_groups = self._check_update_has_security_groups(port) delete_addr_pairs = self._check_update_deletes_allowed_address_pairs( port) has_addr_pairs = self._check_update_has_allowed_address_pairs(port) with db_api.context_manager.writer.using(context): ret_port = super(NsxPluginV2, self).update_port( context, id, port) # Save current mac learning state to check whether it's # being updated or not old_mac_learning_state = ret_port.get(mac_ext.MAC_LEARNING) # copy values over - except fixed_ips as # they've already been processed port['port'].pop('fixed_ips', None) ret_port.update(port['port']) tenant_id = ret_port['tenant_id'] self._update_extra_dhcp_opts_on_port(context, id, port, ret_port) # populate port_security setting if psec.PORTSECURITY not in port['port']: ret_port[psec.PORTSECURITY] = self._get_port_security_binding( context, id) has_ip = self._ip_on_port(ret_port) # validate port security and allowed address pairs if not ret_port[psec.PORTSECURITY]: # has address pairs in request if has_addr_pairs: raise addr_exc.AddressPairAndPortSecurityRequired() elif not delete_addr_pairs: # check if address pairs are in db ret_port[addr_apidef.ADDRESS_PAIRS] = ( self.get_allowed_address_pairs(context, id)) if ret_port[addr_apidef.ADDRESS_PAIRS]: raise addr_exc.AddressPairAndPortSecurityRequired() if (delete_addr_pairs or has_addr_pairs): # delete address pairs and read them in self._delete_allowed_address_pairs(context, id) self._process_create_allowed_address_pairs( context, ret_port, ret_port[addr_apidef.ADDRESS_PAIRS]) # checks if security groups were updated adding/modifying # security groups, port security is set and port has ip if not (has_ip and ret_port[psec.PORTSECURITY]): if has_security_groups: raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() # Update did not have security groups passed in. Check # that port does not have any security groups already on it. filters = {'port_id': [id]} security_groups = ( super(NsxPluginV2, self)._get_port_security_group_bindings( context, filters) ) if security_groups and not delete_security_groups: raise psec_exc.PortSecurityPortHasSecurityGroup() if (delete_security_groups or has_security_groups): # delete the port binding and read it with the new rules. self._delete_port_security_group_bindings(context, id) sgids = self._get_security_groups_on_port(context, port) self._process_port_create_security_group(context, ret_port, sgids) if psec.PORTSECURITY in port['port']: self._process_port_port_security_update( context, port['port'], ret_port) port_queue_id = self._check_for_queue_and_create( context, ret_port) # Populate the mac learning attribute new_mac_learning_state = port['port'].get(mac_ext.MAC_LEARNING) if (new_mac_learning_state is not None and old_mac_learning_state != new_mac_learning_state): self._update_mac_learning_state(context, id, new_mac_learning_state) ret_port[mac_ext.MAC_LEARNING] = new_mac_learning_state self._delete_port_queue_mapping(context, ret_port['id']) self._process_port_queue_mapping(context, ret_port, port_queue_id) self._process_portbindings_create_and_update(context, port['port'], ret_port) nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( context.session, self.cluster, id) # Convert Neutron security groups identifiers into NSX security # profiles identifiers nsx_sec_profile_ids = [ nsx_utils.get_nsx_security_group_id( context.session, self.cluster, neutron_sg_id) for neutron_sg_id in (ret_port[ext_sg.SECURITYGROUPS] or [])] # Perform the NSX operation outside of the DB transaction LOG.debug("Updating port %s on NSX backend", ret_port['id']) if nsx_port_id: try: switchlib.update_port( self.cluster, nsx_switch_id, nsx_port_id, id, tenant_id, ret_port['name'], ret_port['device_id'], ret_port['admin_state_up'], ret_port['mac_address'], ret_port['fixed_ips'], ret_port[psec.PORTSECURITY], nsx_sec_profile_ids, ret_port[qos.QUEUE], ret_port.get(mac_ext.MAC_LEARNING), ret_port.get(addr_apidef.ADDRESS_PAIRS)) # Update the port status from nsx. If we fail here hide it # since the port was successfully updated but we were not # able to retrieve the status. ret_port['status'] = switchlib.get_port_status( self.cluster, nsx_switch_id, nsx_port_id) # FIXME(arosen) improve exception handling. except Exception: ret_port['status'] = constants.PORT_STATUS_ERROR LOG.exception("Unable to update port id: %s.", nsx_port_id) # If nsx_port_id is not in database or in nsx put in error state. else: ret_port['status'] = constants.PORT_STATUS_ERROR return ret_port def delete_port(self, context, id, l3_port_check=True, nw_gw_port_check=True): """Deletes a port on a specified Virtual Network. If the port contains a remote interface attachment, the remote interface is first un-plugged and then the port is deleted. :returns: None :raises: exception.PortInUse :raises: exception.PortNotFound :raises: exception.NetworkNotFound """ # if needed, check to see if this is a port owned by # a l3 router. If so, we should prevent deletion here if l3_port_check: self.prevent_l3_port_deletion(context, id) neutron_db_port = self.get_port(context, id) # Perform the same check for ports owned by layer-2 gateways if nw_gw_port_check: self.prevent_network_gateway_port_deletion(context, neutron_db_port) port_delete_func = self._port_drivers['delete'].get( neutron_db_port['device_owner'], self._port_drivers['delete']['default']) port_delete_func(context, neutron_db_port) self.disassociate_floatingips(context, id) with db_api.context_manager.writer.using(context): queue = self._get_port_queue_bindings(context, {'port_id': [id]}) # metadata_dhcp_host_route self.handle_port_metadata_access( context, neutron_db_port, is_delete=True) super(NsxPluginV2, self).delete_port(context, id) # Delete qos queue if possible if queue: self.delete_qos_queue(context, queue[0]['queue_id'], False) self.handle_port_dhcp_access( context, neutron_db_port, action='delete_port') def get_port(self, context, id, fields=None): with db_api.context_manager.writer.using(context): if (self.nsx_sync_opts.always_read_status or fields and 'status' in fields): # Perform explicit state synchronization db_port = self._get_port(context, id) self._synchronizer.synchronize_port( context, db_port) return self._make_port_dict(db_port, fields) else: return super(NsxPluginV2, self).get_port(context, id, fields) def get_router(self, context, id, fields=None): with db_api.context_manager.writer.using(context): if (self.nsx_sync_opts.always_read_status or fields and 'status' in fields): db_router = self._get_router(context, id) # Perform explicit state synchronization self._synchronizer.synchronize_router( context, db_router) return self._make_router_dict(db_router, fields) else: return super(NsxPluginV2, self).get_router(context, id, fields) def _create_lrouter(self, context, router, nexthop): tenant_id = router['tenant_id'] distributed = router.get('distributed') try: lrouter = routerlib.create_lrouter( self.cluster, router['id'], tenant_id, router['name'], nexthop, distributed=(validators.is_attr_set(distributed) and distributed)) except nsx_exc.InvalidVersion: msg = _("Cannot create a distributed router with the NSX " "platform currently in execution. Please, try " "without specifying the 'distributed' attribute.") LOG.exception(msg) raise n_exc.BadRequest(resource='router', msg=msg) except api_exc.NsxApiException: err_msg = _("Unable to create logical router on NSX Platform") LOG.exception(err_msg) raise nsx_exc.NsxPluginException(err_msg=err_msg) # Create the port here - and update it later if we have gw_info try: self._create_and_attach_router_port( self.cluster, context, lrouter['uuid'], {'fake_ext_gw': True}, "L3GatewayAttachment", self.cluster.default_l3_gw_service_uuid) except nsx_exc.NsxPluginException: LOG.exception("Unable to create L3GW port on logical router " "%(router_uuid)s. Verify Default Layer-3 " "Gateway service %(def_l3_gw_svc)s id is " "correct", {'router_uuid': lrouter['uuid'], 'def_l3_gw_svc': self.cluster.default_l3_gw_service_uuid}) # Try and remove logical router from NSX routerlib.delete_lrouter(self.cluster, lrouter['uuid']) # Return user a 500 with an apter message raise nsx_exc.NsxPluginException( err_msg=(_("Unable to create router %s on NSX backend") % router['id'])) lrouter['status'] = constants.ACTIVE return lrouter def _process_extra_attr_router_create(self, context, router_db, r): for extra_attr in l3_attrs_db.get_attr_info().keys(): if extra_attr in r: self.set_extra_attr_value(context, router_db, extra_attr, r[extra_attr]) def create_router(self, context, router): # NOTE(salvatore-orlando): We completely override this method in # order to be able to use the NSX ID as Neutron ID # TODO(salvatore-orlando): Propose upstream patch for allowing # 3rd parties to specify IDs as we do with l2 plugin r = router['router'] has_gw_info = False tenant_id = r['tenant_id'] # default value to set - nsx wants it (even if we don't have it) nexthop = NSX_DEFAULT_NEXTHOP # if external gateway info are set, then configure nexthop to # default external gateway if 'external_gateway_info' in r and r.get('external_gateway_info'): has_gw_info = True gw_info = r['external_gateway_info'] del r['external_gateway_info'] # The following DB read will be performed again when updating # gateway info. This is not great, but still better than # creating NSX router here and updating it later network_id = (gw_info.get('network_id', None) if gw_info else None) if network_id: ext_net = self._get_network(context, network_id) if not ext_net.external: msg = (_("Network '%s' is not a valid external " "network") % network_id) raise n_exc.BadRequest(resource='router', msg=msg) if ext_net.subnets: ext_subnet = ext_net.subnets[0] nexthop = ext_subnet.gateway_ip # NOTE(salv-orlando): Pre-generating uuid for Neutron # router. This will be removed once the router create operation # becomes an asynchronous task neutron_router_id = str(uuid.uuid4()) r['id'] = neutron_router_id # Populate distributed attribute in order to ensure the appropriate # type of router is created in the NSX backend r['distributed'] = l3_dvr_db.is_distributed_router(r) lrouter = self._create_lrouter(context, r, nexthop) # TODO(salv-orlando): Deal with backend object removal in case # of db failures with db_api.context_manager.writer.using(context): # Transaction nesting is needed to avoid foreign key violations # when processing the distributed router binding with db_api.context_manager.writer.using(context): router_db = l3_db_models.Router( id=neutron_router_id, tenant_id=tenant_id, name=r['name'], admin_state_up=r['admin_state_up'], status=lrouter['status']) context.session.add(router_db) self._process_extra_attr_router_create(context, router_db, r) # Ensure neutron router is moved into the transaction's buffer context.session.flush() # Add mapping between neutron and nsx identifiers nsx_db.add_neutron_nsx_router_mapping( context.session, router_db['id'], lrouter['uuid']) if has_gw_info: # NOTE(salv-orlando): This operation has been moved out of the # database transaction since it performs several NSX queries, # ithis ncreasing the risk of deadlocks between eventlet and # sqlalchemy operations. # Set external gateway and remove router in case of failure try: self._update_router_gw_info(context, router_db['id'], gw_info) except (n_exc.NeutronException, api_exc.NsxApiException): with excutils.save_and_reraise_exception(): # As setting gateway failed, the router must be deleted # in order to ensure atomicity router_id = router_db['id'] LOG.warning("Failed to set gateway info for router " "being created:%s - removing router", router_id) self.delete_router(context, router_id) LOG.info("Create router failed while setting external " "gateway. Router:%s has been removed from " "DB and backend", router_id) return self._make_router_dict(router_db) def _update_lrouter(self, context, router_id, name, nexthop, routes=None): nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) return routerlib.update_lrouter( self.cluster, nsx_router_id, name, nexthop, routes=routes) def _update_lrouter_routes(self, context, router_id, routes): nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) routerlib.update_explicit_routes_lrouter( self.cluster, nsx_router_id, routes) def update_router(self, context, router_id, router): # Either nexthop is updated or should be kept as it was before r = router['router'] nexthop = None if 'external_gateway_info' in r and r.get('external_gateway_info'): gw_info = r['external_gateway_info'] # The following DB read will be performed again when updating # gateway info. This is not great, but still better than # creating NSX router here and updating it later network_id = (gw_info.get('network_id', None) if gw_info else None) if network_id: ext_net = self._get_network(context, network_id) if not ext_net.external: msg = (_("Network '%s' is not a valid external " "network") % network_id) raise n_exc.BadRequest(resource='router', msg=msg) if ext_net.subnets: ext_subnet = ext_net.subnets[0] nexthop = ext_subnet.gateway_ip try: for route in r.get('routes', []): if route['destination'] == '0.0.0.0/0': msg = _("'routes' cannot contain route '0.0.0.0/0', " "this must be updated through the default " "gateway attribute") raise n_exc.BadRequest(resource='router', msg=msg) previous_routes = self._update_lrouter( context, router_id, r.get('name'), nexthop, routes=r.get('routes')) # NOTE(salv-orlando): The exception handling below is not correct, but # unfortunately nsxlib raises a neutron notfound exception when an # object is not found in the underlying backend except n_exc.NotFound: # Put the router in ERROR status with db_api.context_manager.writer.using(context): router_db = self._get_router(context, router_id) router_db['status'] = constants.NET_STATUS_ERROR raise nsx_exc.NsxPluginException( err_msg=_("Logical router %s not found " "on NSX Platform") % router_id) except api_exc.NsxApiException: raise nsx_exc.NsxPluginException( err_msg=_("Unable to update logical router on NSX Platform")) except nsx_exc.InvalidVersion: msg = _("Request cannot contain 'routes' with the NSX " "platform currently in execution. Please, try " "without specifying the static routes.") LOG.exception(msg) raise n_exc.BadRequest(resource='router', msg=msg) try: return super(NsxPluginV2, self).update_router(context, router_id, router) except (xroute_exc.InvalidRoutes, xroute_exc.RouterInterfaceInUseByRoute, xroute_exc.RoutesExhausted): with excutils.save_and_reraise_exception(): # revert changes made to NSX self._update_lrouter_routes( context, router_id, previous_routes) def _delete_lrouter(self, context, router_id, nsx_router_id): # The neutron router id (router_id) is ignored in this routine, # but used in plugins deriving from this one routerlib.delete_lrouter(self.cluster, nsx_router_id) def delete_router(self, context, router_id): with db_api.context_manager.writer.using(context): # NOTE(salv-orlando): These checks will be repeated anyway when # calling the superclass. This is wasteful, but is the simplest # way of ensuring a consistent removal of the router both in # the neutron Database and in the NSX backend. self._ensure_router_not_in_use(context, router_id) # TODO(salv-orlando): This call should have no effect on delete # router, but if it does, it should not happen within a # transaction, and it should be restored on rollback self.handle_router_metadata_access( context, router_id, interface=None) nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) # It is safe to remove the router from the database, so remove it # from the backend if nsx_router_id: try: self._delete_lrouter(context, router_id, nsx_router_id) except n_exc.NotFound: # This is not a fatal error, but needs to be logged LOG.warning("Logical router '%s' not found " "on NSX Platform", router_id) except api_exc.NsxApiException: raise nsx_exc.NsxPluginException( err_msg=(_("Unable to delete logical router '%s' " "on NSX Platform") % nsx_router_id)) else: # If no mapping is found it is likely that the logical router does # not exist anymore in the backend. This is not a fatal condition, # but will result in an exception is "None" is passed to # _delete_lrouter LOG.warning("No mapping found for logical router '%s' " "on NSX Platform", router_id) # Remove the NSX mapping first in order to ensure a mapping to # a non-existent NSX router is not left in the DB in case of # failure while removing the router from the neutron DB try: nsx_db.delete_neutron_nsx_router_mapping( context.session, router_id) except db_exc.DBError as d_exc: # Do not make this error fatal LOG.warning("Unable to remove NSX mapping for Neutron router " "%(router_id)s because of the following exception:" "%(d_exc)s", {'router_id': router_id, 'd_exc': str(d_exc)}) # Perform the actual delete on the Neutron DB super(NsxPluginV2, self).delete_router(context, router_id) def _add_subnet_snat_rule(self, context, router, subnet): gw_port = router.gw_port if gw_port and router.enable_snat: # There is a change gw_port might have multiple IPs # In that case we will consider only the first one if gw_port.get('fixed_ips'): snat_ip = gw_port['fixed_ips'][0]['ip_address'] cidr_prefix = int(subnet['cidr'].split('/')[1]) nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router['id']) routerlib.create_lrouter_snat_rule( self.cluster, nsx_router_id, snat_ip, snat_ip, order=NSX_EXTGW_NAT_RULES_ORDER - cidr_prefix, match_criteria={'source_ip_addresses': subnet['cidr']}) def _delete_subnet_snat_rule(self, context, router, subnet): # Remove SNAT rule if external gateway is configured if router.gw_port: nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router['id']) routerlib.delete_nat_rules_by_match( self.cluster, nsx_router_id, "SourceNatRule", max_num_expected=1, min_num_expected=1, raise_on_len_mismatch=False, source_ip_addresses=subnet['cidr']) def add_router_interface(self, context, router_id, interface_info): # When adding interface by port_id we need to create the # peer port on the nsx logical router in this routine port_id = interface_info.get('port_id') router_iface_info = super(NsxPluginV2, self).add_router_interface( context, router_id, interface_info) # router_iface_info will always have a subnet_id attribute subnet_id = router_iface_info['subnet_id'] nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) if port_id: port_data = self.get_port(context, port_id) # If security groups are present we need to remove them as # this is a router port and disable port security. if port_data['security_groups']: self.update_port(context, port_id, {'port': {'security_groups': [], psec.PORTSECURITY: False}}) nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( context.session, self.cluster, port_id) # Unplug current attachment from lswitch port switchlib.plug_vif_interface(self.cluster, nsx_switch_id, nsx_port_id, "NoAttachment") # Create logical router port and plug patch attachment self._create_and_attach_router_port( self.cluster, context, nsx_router_id, port_data, "PatchAttachment", nsx_port_id, subnet_ids=[subnet_id]) subnet = self._get_subnet(context, subnet_id) # If there is an external gateway we need to configure the SNAT rule. # Fetch router from DB router = self._get_router(context, router_id) self._add_subnet_snat_rule(context, router, subnet) routerlib.create_lrouter_nosnat_rule( self.cluster, nsx_router_id, order=NSX_NOSNAT_RULES_ORDER, match_criteria={'destination_ip_addresses': subnet['cidr']}) # Ensure the NSX logical router has a connection to a 'metadata access' # network (with a proxy listening on its DHCP port), by creating it # if needed. self.handle_router_metadata_access( context, router_id, interface=router_iface_info) LOG.debug("Add_router_interface completed for subnet:%(subnet_id)s " "and router:%(router_id)s", {'subnet_id': subnet_id, 'router_id': router_id}) return router_iface_info def get_l3_agents_hosting_routers(self, context, routers): # This method is just a stub added because is required by the l3 dvr # mixin. That's so much for a management layer which is plugin # agnostic return [] def _create_snat_intf_ports_if_not_exists(self, context, router): # VMware plugins do not need SNAT interface ports return [] def _add_csnat_router_interface_port(self, context, router, network_id, subnet_id, do_pop=True): # VMware plugins do not need SNAT interface ports return def _delete_csnat_router_interface_ports(self, context, router, subnet_id=None): # VMware plugins do not need SNAT interface ports return def remove_router_interface(self, context, router_id, interface_info): # The code below is duplicated from base class, but comes handy # as we need to retrieve the router port id before removing the port subnet = None subnet_id = None if 'port_id' in interface_info: port_id = interface_info['port_id'] # find subnet_id - it is need for removing the SNAT rule port = self._get_port(context, port_id) if port.get('fixed_ips'): subnet_id = port['fixed_ips'][0]['subnet_id'] if not (port['device_owner'] in constants.ROUTER_INTERFACE_OWNERS and port['device_id'] == router_id): raise l3_exc.RouterInterfaceNotFound( router_id=router_id, port_id=port_id) elif 'subnet_id' in interface_info: subnet_id = interface_info['subnet_id'] subnet = self._get_subnet(context, subnet_id) rport_qry = context.session.query(models_v2.Port) ports = rport_qry.filter_by( device_id=router_id, network_id=subnet['network_id']).filter( models_v2.Port.device_owner.in_( constants.ROUTER_INTERFACE_OWNERS)) for p in ports: if p['fixed_ips'][0]['subnet_id'] == subnet_id: port_id = p['id'] break else: raise l3_exc.RouterInterfaceNotFoundForSubnet( router_id=router_id, subnet_id=subnet_id) # Finally remove the data from the Neutron DB # This will also destroy the port on the logical switch info = super(NsxPluginV2, self).remove_router_interface( context, router_id, interface_info) try: # Ensure the connection to the 'metadata access network' # is removed (with the network) if this the last subnet # on the router self.handle_router_metadata_access( context, router_id, interface=None) if not subnet: subnet = self._get_subnet(context, subnet_id) router = self._get_router(context, router_id) # If router is enabled_snat = False there are no snat rules to # delete. if router.enable_snat: self._delete_subnet_snat_rule(context, router, subnet) # Relax the minimum expected number as the nosnat rules # do not exist in 2.x deployments nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) routerlib.delete_nat_rules_by_match( self.cluster, nsx_router_id, "NoSourceNatRule", max_num_expected=1, min_num_expected=0, raise_on_len_mismatch=False, destination_ip_addresses=subnet['cidr']) except n_exc.NotFound: LOG.error("Logical router resource %s not found " "on NSX platform", router_id) except api_exc.NsxApiException: raise nsx_exc.NsxPluginException( err_msg=(_("Unable to update logical router" "on NSX Platform"))) return info def _retrieve_and_delete_nat_rules(self, context, floating_ip_address, internal_ip, nsx_router_id, min_num_rules_expected=0): """Finds and removes NAT rules from a NSX router.""" # NOTE(salv-orlando): The context parameter is ignored in this method # but used by derived classes try: # Remove DNAT rule for the floating IP routerlib.delete_nat_rules_by_match( self.cluster, nsx_router_id, "DestinationNatRule", max_num_expected=1, min_num_expected=min_num_rules_expected, destination_ip_addresses=floating_ip_address) # Remove SNAT rules for the floating IP routerlib.delete_nat_rules_by_match( self.cluster, nsx_router_id, "SourceNatRule", max_num_expected=1, min_num_expected=min_num_rules_expected, source_ip_addresses=internal_ip) routerlib.delete_nat_rules_by_match( self.cluster, nsx_router_id, "SourceNatRule", max_num_expected=1, min_num_expected=min_num_rules_expected, destination_ip_addresses=internal_ip) except api_exc.NsxApiException: with excutils.save_and_reraise_exception(): LOG.exception("An error occurred while removing NAT rules " "on the NSX platform for floating ip:%s", floating_ip_address) except nsx_exc.NatRuleMismatch: # Do not surface to the user LOG.warning("An incorrect number of matching NAT rules " "was found on the NSX platform") def _remove_floatingip_address(self, context, fip_db): # Remove floating IP address from logical router port # Fetch logical port of router's external gateway router_id = fip_db.router_id nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) nsx_gw_port_id = routerlib.find_router_gw_port( context, self.cluster, nsx_router_id)['uuid'] ext_neutron_port_db = self._get_port(context.elevated(), fip_db.floating_port_id) nsx_floating_ips = self._build_ip_address_list( context.elevated(), ext_neutron_port_db['fixed_ips']) routerlib.update_lrouter_port_ips(self.cluster, nsx_router_id, nsx_gw_port_id, ips_to_add=[], ips_to_remove=nsx_floating_ips) def _floatingip_status(self, floatingip_db, associated): if (associated and floatingip_db['status'] != constants.FLOATINGIP_STATUS_ACTIVE): return constants.FLOATINGIP_STATUS_ACTIVE elif (not associated and floatingip_db['status'] != constants.FLOATINGIP_STATUS_DOWN): return constants.FLOATINGIP_STATUS_DOWN # in any case ensure the status is not reset by this method! return floatingip_db['status'] def _update_fip_assoc(self, context, fip, floatingip_db, external_port): """Update floating IP association data. Overrides method from base class. The method is augmented for creating NAT rules in the process. """ # Store router currently serving the floating IP old_router_id = floatingip_db.router_id port_id, internal_ip, router_id = self._check_and_get_fip_assoc( context, fip, floatingip_db) floating_ip = floatingip_db['floating_ip_address'] # If there's no association router_id will be None if router_id: nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) self._retrieve_and_delete_nat_rules( context, floating_ip, internal_ip, nsx_router_id) nsx_floating_ips = self._build_ip_address_list( context.elevated(), external_port['fixed_ips']) floating_ip = floatingip_db['floating_ip_address'] # Retrieve and delete existing NAT rules, if any if old_router_id: nsx_old_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, old_router_id) # Retrieve the current internal ip _p, _s, old_internal_ip = self._internal_fip_assoc_data( context, {'id': floatingip_db.id, 'port_id': floatingip_db.fixed_port_id, 'fixed_ip_address': str(floatingip_db.fixed_ip_address), 'tenant_id': floatingip_db.tenant_id}, floatingip_db.tenant_id) nsx_gw_port_id = routerlib.find_router_gw_port( context, self.cluster, nsx_old_router_id)['uuid'] self._retrieve_and_delete_nat_rules( context, floating_ip, old_internal_ip, nsx_old_router_id) routerlib.update_lrouter_port_ips( self.cluster, nsx_old_router_id, nsx_gw_port_id, ips_to_add=[], ips_to_remove=nsx_floating_ips) if router_id: nsx_gw_port_id = routerlib.find_router_gw_port( context, self.cluster, nsx_router_id)['uuid'] # Re-create NAT rules only if a port id is specified if fip.get('port_id'): try: # Setup DNAT rules for the floating IP routerlib.create_lrouter_dnat_rule( self.cluster, nsx_router_id, internal_ip, order=NSX_FLOATINGIP_NAT_RULES_ORDER, match_criteria={'destination_ip_addresses': floating_ip}) # Setup SNAT rules for the floating IP # Create a SNAT rule for enabling connectivity to the # floating IP from the same network as the internal port # Find subnet id for internal_ip from fixed_ips internal_port = self._get_port(context, port_id) # Cchecks not needed on statements below since otherwise # _internal_fip_assoc_data would have raised subnet_ids = [ip['subnet_id'] for ip in internal_port['fixed_ips'] if ip['ip_address'] == internal_ip] internal_subnet_cidr = self._build_ip_address_list( context, internal_port['fixed_ips'], subnet_ids=subnet_ids)[0] routerlib.create_lrouter_snat_rule( self.cluster, nsx_router_id, floating_ip, floating_ip, order=NSX_NOSNAT_RULES_ORDER - 1, match_criteria={'source_ip_addresses': internal_subnet_cidr, 'destination_ip_addresses': internal_ip}) # setup snat rule such that src ip of an IP packet when # using floating is the floating ip itself. routerlib.create_lrouter_snat_rule( self.cluster, nsx_router_id, floating_ip, floating_ip, order=NSX_FLOATINGIP_NAT_RULES_ORDER, match_criteria={'source_ip_addresses': internal_ip}) # Add Floating IP address to router_port routerlib.update_lrouter_port_ips( self.cluster, nsx_router_id, nsx_gw_port_id, ips_to_add=nsx_floating_ips, ips_to_remove=[]) except api_exc.NsxApiException: LOG.exception("An error occurred while creating NAT " "rules on the NSX platform for floating " "ip:%(floating_ip)s mapped to " "internal ip:%(internal_ip)s", {'floating_ip': floating_ip, 'internal_ip': internal_ip}) msg = _("Failed to update NAT rules for floatingip update") raise nsx_exc.NsxPluginException(err_msg=msg) # Update also floating ip status (no need to call base class method) new_status = self._floatingip_status(floatingip_db, router_id) floatingip_db.fixed_ip_address = internal_ip floatingip_db.fixed_port_id = port_id floatingip_db.router_id = router_id floatingip_db.status = new_status return {'fixed_ip_address': internal_ip, 'fixed_port_id': port_id, 'router_id': router_id, 'last_known_router_id': None, 'floating_ip_address': floatingip_db.floating_ip_address, 'floating_network_id': floatingip_db.floating_network_id, 'floating_ip_id': floatingip_db.id, 'context': context} @lockutils.synchronized('vmware', 'neutron-') def create_floatingip(self, context, floatingip): return super(NsxPluginV2, self).create_floatingip(context, floatingip) @lockutils.synchronized('vmware', 'neutron-') def update_floatingip(self, context, floatingip_id, floatingip): return super(NsxPluginV2, self).update_floatingip(context, floatingip_id, floatingip) @lockutils.synchronized('vmware', 'neutron-') def delete_floatingip(self, context, id): fip_db = self._get_floatingip(context, id) # Check whether the floating ip is associated or not if fip_db.fixed_port_id: nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, fip_db.router_id) self._retrieve_and_delete_nat_rules(context, fip_db.floating_ip_address, fip_db.fixed_ip_address, nsx_router_id, min_num_rules_expected=1) # Remove floating IP address from logical router port self._remove_floatingip_address(context, fip_db) return super(NsxPluginV2, self).delete_floatingip(context, id) def disassociate_floatingips(self, context, port_id): try: fip_qry = context.session.query(l3_db_models.FloatingIP) fip_dbs = fip_qry.filter_by(fixed_port_id=port_id) for fip_db in fip_dbs: nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, fip_db.router_id) self._retrieve_and_delete_nat_rules(context, fip_db.floating_ip_address, fip_db.fixed_ip_address, nsx_router_id, min_num_rules_expected=1) self._remove_floatingip_address(context, fip_db) except sa_exc.NoResultFound: LOG.debug("The port '%s' is not associated with floating IPs", port_id) except n_exc.NotFound: LOG.warning("Nat rules not found in nsx for port: %s", id) # NOTE(ihrachys): L3 agent notifications don't make sense for # NSX VMWare plugin since there is no L3 agent in such setup, so # disabling them here. super(NsxPluginV2, self).disassociate_floatingips( context, port_id, do_notify=False) def create_network_gateway(self, context, network_gateway): """Create a layer-2 network gateway. Create the gateway service on NSX platform and corresponding data structures in Neutron datase. """ gw_data = network_gateway[networkgw.GATEWAY_RESOURCE_NAME] tenant_id = gw_data['tenant_id'] # Ensure the default gateway in the config file is in sync with the db self._ensure_default_network_gateway() # Validate provided gateway device list self._validate_device_list(context, tenant_id, gw_data) devices = gw_data['devices'] # Populate default physical network where not specified for device in devices: if not device.get('interface_name'): device['interface_name'] = (self.cluster. nsx_default_interface_name) try: # Replace Neutron device identifiers with NSX identifiers dev_map = dict((dev['id'], dev['interface_name']) for dev in devices) nsx_devices = [] for db_device in self._query_gateway_devices( context, filters={'id': [device['id'] for device in devices]}): nsx_devices.append( {'id': db_device['nsx_id'], 'interface_name': dev_map[db_device['id']]}) nsx_res = l2gwlib.create_l2_gw_service( self.cluster, tenant_id, gw_data['name'], nsx_devices) nsx_uuid = nsx_res.get('uuid') except api_exc.Conflict: raise nsx_exc.L2GatewayAlreadyInUse(gateway=gw_data['name']) except api_exc.NsxApiException: err_msg = _("Unable to create l2_gw_service for: %s") % gw_data LOG.exception(err_msg) raise nsx_exc.NsxPluginException(err_msg=err_msg) gw_data['id'] = nsx_uuid return super(NsxPluginV2, self).create_network_gateway( context, network_gateway, validate_device_list=False) def delete_network_gateway(self, context, gateway_id): """Remove a layer-2 network gateway. Remove the gateway service from NSX platform and corresponding data structures in Neutron datase. """ # Ensure the default gateway in the config file is in sync with the db self._ensure_default_network_gateway() with db_api.context_manager.writer.using(context): try: super(NsxPluginV2, self).delete_network_gateway( context, gateway_id) l2gwlib.delete_l2_gw_service(self.cluster, gateway_id) except api_exc.ResourceNotFound: # Do not cause a 500 to be returned to the user if # the corresponding NSX resource does not exist LOG.exception("Unable to remove gateway service from " "NSX plaform - the resource was not found") def get_network_gateway(self, context, id, fields=None): # Ensure the default gateway in the config file is in sync with the db self._ensure_default_network_gateway() return super(NsxPluginV2, self).get_network_gateway(context, id, fields) def get_network_gateways(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Ensure the default gateway in the config file is in sync with the db self._ensure_default_network_gateway() # Ensure the tenant_id attribute is populated on returned gateways return super(NsxPluginV2, self).get_network_gateways( context, filters, fields, sorts, limit, marker, page_reverse) def update_network_gateway(self, context, id, network_gateway): # Ensure the default gateway in the config file is in sync with the db self._ensure_default_network_gateway() # Update gateway on backend when there's a name change name = network_gateway[networkgw.GATEWAY_RESOURCE_NAME].get('name') if name: try: l2gwlib.update_l2_gw_service(self.cluster, id, name) except api_exc.NsxApiException: # Consider backend failures as non-fatal, but still warn # because this might indicate something dodgy is going on LOG.warning("Unable to update name on NSX backend " "for network gateway: %s", id) return super(NsxPluginV2, self).update_network_gateway( context, id, network_gateway) def connect_network(self, context, network_gateway_id, network_mapping_info): # Ensure the default gateway in the config file is in sync with the db self._ensure_default_network_gateway() try: return super(NsxPluginV2, self).connect_network( context, network_gateway_id, network_mapping_info) except api_exc.Conflict: raise nsx_exc.L2GatewayAlreadyInUse(gateway=network_gateway_id) def disconnect_network(self, context, network_gateway_id, network_mapping_info): # Ensure the default gateway in the config file is in sync with the db self._ensure_default_network_gateway() return super(NsxPluginV2, self).disconnect_network( context, network_gateway_id, network_mapping_info) def _get_nsx_device_id(self, context, device_id): return self._get_gateway_device(context, device_id)['nsx_id'] def _rollback_gw_device(self, context, device_id, gw_data=None, new_status=None, is_create=False): LOG.error("Rolling back database changes for gateway device %s " "because of an error in the NSX backend", device_id) with db_api.context_manager.writer.using(context): query = model_query.query_with_hooks( context, nsx_models.NetworkGatewayDevice).filter( nsx_models.NetworkGatewayDevice.id == device_id) if is_create: query.delete(synchronize_session=False) else: super(NsxPluginV2, self).update_gateway_device( context, device_id, {networkgw.DEVICE_RESOURCE_NAME: gw_data}) if new_status: query.update({'status': new_status}, synchronize_session=False) # TODO(salv-orlando): Handlers for Gateway device operations should be # moved into the appropriate nsx_handlers package once the code for the # blueprint nsx-async-backend-communication merges def create_gateway_device_handler(self, context, gateway_device, client_certificate): neutron_id = gateway_device['id'] try: nsx_res = l2gwlib.create_gateway_device( self.cluster, gateway_device['tenant_id'], gateway_device['name'], neutron_id, self.cluster.default_tz_uuid, gateway_device['connector_type'], gateway_device['connector_ip'], client_certificate) # Fetch status (it needs another NSX API call) device_status = nsx_utils.get_nsx_device_status(self.cluster, nsx_res['uuid']) # set NSX GW device in neutron database and update status with db_api.context_manager.writer.using(context): query = model_query.query_with_hooks( context, nsx_models.NetworkGatewayDevice).filter( nsx_models.NetworkGatewayDevice.id == neutron_id) query.update({'status': device_status, 'nsx_id': nsx_res['uuid']}, synchronize_session=False) LOG.debug("Neutron gateway device: %(neutron_id)s; " "NSX transport node identifier: %(nsx_id)s; " "Operational status: %(status)s.", {'neutron_id': neutron_id, 'nsx_id': nsx_res['uuid'], 'status': device_status}) return device_status except (nsx_exc.InvalidSecurityCertificate, api_exc.NsxApiException): with excutils.save_and_reraise_exception(): self._rollback_gw_device(context, neutron_id, is_create=True) def update_gateway_device_handler(self, context, gateway_device, old_gateway_device_data, client_certificate): nsx_id = gateway_device['nsx_id'] neutron_id = gateway_device['id'] try: l2gwlib.update_gateway_device( self.cluster, nsx_id, gateway_device['tenant_id'], gateway_device['name'], neutron_id, self.cluster.default_tz_uuid, gateway_device['connector_type'], gateway_device['connector_ip'], client_certificate) # Fetch status (it needs another NSX API call) device_status = nsx_utils.get_nsx_device_status(self.cluster, nsx_id) # update status with db_api.context_manager.writer.using(context): query = model_query.query_with_hooks( context, nsx_models.NetworkGatewayDevice).filter( nsx_models.NetworkGatewayDevice.id == neutron_id) query.update({'status': device_status}, synchronize_session=False) LOG.debug("Neutron gateway device: %(neutron_id)s; " "NSX transport node identifier: %(nsx_id)s; " "Operational status: %(status)s.", {'neutron_id': neutron_id, 'nsx_id': nsx_id, 'status': device_status}) return device_status except (nsx_exc.InvalidSecurityCertificate, api_exc.NsxApiException): with excutils.save_and_reraise_exception(): self._rollback_gw_device(context, neutron_id, gw_data=old_gateway_device_data) except n_exc.NotFound: # The gateway device was probably deleted in the backend. # The DB change should be rolled back and the status must # be put in error with excutils.save_and_reraise_exception(): self._rollback_gw_device(context, neutron_id, gw_data=old_gateway_device_data, new_status=networkgw_db.ERROR) def get_gateway_device(self, context, device_id, fields=None): # Get device from database gw_device = super(NsxPluginV2, self).get_gateway_device( context, device_id, fields, include_nsx_id=True) # Fetch status from NSX nsx_id = gw_device['nsx_id'] device_status = nsx_utils.get_nsx_device_status(self.cluster, nsx_id) # TODO(salv-orlando): Asynchronous sync for gateway device status # Update status in database with db_api.context_manager.writer.using(context): query = model_query.query_with_hooks( context, nsx_models.NetworkGatewayDevice).filter( nsx_models.NetworkGatewayDevice.id == device_id) query.update({'status': device_status}, synchronize_session=False) gw_device['status'] = device_status return gw_device def get_gateway_devices(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Get devices from database devices = super(NsxPluginV2, self).get_gateway_devices( context, filters, fields, include_nsx_id=True) # Fetch operational status from NSX, filter by tenant tag # TODO(salv-orlando): Asynchronous sync for gateway device status tenant_id = context.tenant_id if not context.is_admin else None nsx_statuses = nsx_utils.get_nsx_device_statuses(self.cluster, tenant_id) # Update statuses in database with db_api.context_manager.writer.using(context): for device in devices: new_status = nsx_statuses.get(device['nsx_id']) if new_status: device['status'] = new_status return devices def create_gateway_device(self, context, gateway_device): # NOTE(salv-orlando): client-certificate will not be stored # in the database device_data = gateway_device[networkgw.DEVICE_RESOURCE_NAME] client_certificate = device_data.pop('client_certificate') gw_device = super(NsxPluginV2, self).create_gateway_device( context, gateway_device) # DB operation was successful, perform NSX operation gw_device['status'] = self.create_gateway_device_handler( context, gw_device, client_certificate) return gw_device def update_gateway_device(self, context, device_id, gateway_device): # NOTE(salv-orlando): client-certificate will not be stored # in the database client_certificate = ( gateway_device[networkgw.DEVICE_RESOURCE_NAME].pop( 'client_certificate', None)) # Retrive current state from DB in case a rollback should be needed old_gw_device_data = super(NsxPluginV2, self).get_gateway_device( context, device_id, include_nsx_id=True) gw_device = super(NsxPluginV2, self).update_gateway_device( context, device_id, gateway_device, include_nsx_id=True) # DB operation was successful, perform NSX operation gw_device['status'] = self.update_gateway_device_handler( context, gw_device, old_gw_device_data, client_certificate) gw_device.pop('nsx_id') return gw_device def delete_gateway_device(self, context, device_id): nsx_device_id = self._get_nsx_device_id(context, device_id) super(NsxPluginV2, self).delete_gateway_device( context, device_id) # DB operation was successful, perform NSX operation # TODO(salv-orlando): State consistency with neutron DB # should be ensured even in case of backend failures try: l2gwlib.delete_gateway_device(self.cluster, nsx_device_id) except n_exc.NotFound: LOG.warning("Removal of gateway device: %(neutron_id)s failed " "on NSX backend (NSX id:%(nsx_id)s) because the " "NSX resource was not found", {'neutron_id': device_id, 'nsx_id': nsx_device_id}) except api_exc.NsxApiException: with excutils.save_and_reraise_exception(): # In this case a 500 should be returned LOG.exception("Removal of gateway device: %(neutron_id)s " "failed on NSX backend (NSX id:%(nsx_id)s). " "Neutron and NSX states have diverged.", {'neutron_id': device_id, 'nsx_id': nsx_device_id}) def create_security_group(self, context, security_group, default_sg=False): """Create security group. If default_sg is true that means we are creating a default security group and we don't need to check if one exists. """ s = security_group.get('security_group') tenant_id = s['tenant_id'] if not default_sg: self._ensure_default_security_group(context, tenant_id) # NOTE(salv-orlando): Pre-generating Neutron ID for security group. neutron_id = str(uuid.uuid4()) nsx_secgroup = secgrouplib.create_security_profile( self.cluster, tenant_id, neutron_id, s) with db_api.context_manager.writer.using(context): s['id'] = neutron_id sec_group = super(NsxPluginV2, self).create_security_group( context, security_group, default_sg) context.session.flush() # Add mapping between neutron and nsx identifiers nsx_db.add_neutron_nsx_security_group_mapping( context.session, neutron_id, nsx_secgroup['uuid']) return sec_group def update_security_group(self, context, secgroup_id, security_group): secgroup = (super(NsxPluginV2, self). update_security_group(context, secgroup_id, security_group)) if ('name' in security_group['security_group'] and secgroup['name'] != 'default'): nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( context.session, self.cluster, secgroup_id) try: name = security_group['security_group']['name'] secgrouplib.update_security_profile( self.cluster, nsx_sec_profile_id, name) except (n_exc.NotFound, api_exc.NsxApiException) as e: # Reverting the DB change is not really worthwhile # for a mismatch between names. It's the rules that # we care about. LOG.error('Error while updating security profile ' '%(uuid)s with name %(name)s: %(error)s.', {'uuid': secgroup_id, 'name': name, 'error': e}) return secgroup def delete_security_group(self, context, security_group_id): """Delete a security group. :param security_group_id: security group rule to remove. """ with db_api.context_manager.writer.using(context): security_group = super(NsxPluginV2, self).get_security_group( context, security_group_id) if not security_group: raise ext_sg.SecurityGroupNotFound(id=security_group_id) if security_group['name'] == 'default' and not context.is_admin: raise ext_sg.SecurityGroupCannotRemoveDefault() filters = {'security_group_id': [security_group['id']]} if super(NsxPluginV2, self)._get_port_security_group_bindings( context, filters): raise ext_sg.SecurityGroupInUse(id=security_group['id']) nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( context.session, self.cluster, security_group_id) try: secgrouplib.delete_security_profile( self.cluster, nsx_sec_profile_id) except n_exc.NotFound: # The security profile was not found on the backend # do not fail in this case. LOG.warning("The NSX security profile %(sec_profile_id)s, " "associated with the Neutron security group " "%(sec_group_id)s was not found on the " "backend", {'sec_profile_id': nsx_sec_profile_id, 'sec_group_id': security_group_id}) except api_exc.NsxApiException: # Raise and fail the operation, as there is a problem which # prevented the sec group from being removed from the backend LOG.exception("An exception occurred while removing the " "NSX security profile %(sec_profile_id)s, " "associated with Netron security group " "%(sec_group_id)s", {'sec_profile_id': nsx_sec_profile_id, 'sec_group_id': security_group_id}) raise nsx_exc.NsxPluginException( _("Unable to remove security group %s from backend"), security_group['id']) return super(NsxPluginV2, self).delete_security_group( context, security_group_id) def _validate_security_group_rules(self, context, rules): for rule in rules['security_group_rules']: r = rule.get('security_group_rule') port_based_proto = (self._get_ip_proto_number(r['protocol']) in constants.IP_PROTOCOL_MAP.values()) if (not port_based_proto and (r['port_range_min'] is not None or r['port_range_max'] is not None)): msg = (_("Port values not valid for " "protocol: %s") % r['protocol']) raise n_exc.BadRequest(resource='security_group_rule', msg=msg) return super(NsxPluginV2, self)._validate_security_group_rules(context, rules) def create_security_group_rule(self, context, security_group_rule): """Create a single security group rule.""" bulk_rule = {'security_group_rules': [security_group_rule]} return self.create_security_group_rule_bulk(context, bulk_rule)[0] def create_security_group_rule_bulk(self, context, security_group_rules): """Create security group rules. :param security_group_rule: list of rules to create """ s = security_group_rules.get('security_group_rules') # TODO(arosen) is there anyway we could avoid having the update of # the security group rules in nsx outside of this transaction? with db_api.context_manager.writer.using(context): security_group_id = self._validate_security_group_rules( context, security_group_rules) # Check to make sure security group exists security_group = super(NsxPluginV2, self).get_security_group( context, security_group_id) if not security_group: raise ext_sg.SecurityGroupNotFound(id=security_group_id) # Check for duplicate rules self._check_for_duplicate_rules(context, s) # gather all the existing security group rules since we need all # of them to PUT to NSX. existing_rules = self.get_security_group_rules( context, {'security_group_id': [security_group['id']]}) combined_rules = sg_utils.merge_security_group_rules_with_current( context.session, self.cluster, s, existing_rules) nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( context.session, self.cluster, security_group_id) secgrouplib.update_security_group_rules(self.cluster, nsx_sec_profile_id, combined_rules) return super( NsxPluginV2, self).create_security_group_rule_bulk_native( context, security_group_rules) def delete_security_group_rule(self, context, sgrid): """Delete a security group rule :param sgrid: security group id to remove. """ with db_api.context_manager.writer.using(context): # determine security profile id security_group_rule = ( super(NsxPluginV2, self).get_security_group_rule( context, sgrid)) if not security_group_rule: raise ext_sg.SecurityGroupRuleNotFound(id=sgrid) sgid = security_group_rule['security_group_id'] current_rules = self.get_security_group_rules( context, {'security_group_id': [sgid]}) current_rules_nsx = sg_utils.get_security_group_rules_nsx_format( context.session, self.cluster, current_rules, True) sg_utils.remove_security_group_with_id_and_id_field( current_rules_nsx, sgrid) nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( context.session, self.cluster, sgid) secgrouplib.update_security_group_rules( self.cluster, nsx_sec_profile_id, current_rules_nsx) return super(NsxPluginV2, self).delete_security_group_rule(context, sgrid) def create_qos_queue(self, context, qos_queue, check_policy=True): q = qos_queue.get('qos_queue') self._validate_qos_queue(context, q) q['id'] = queuelib.create_lqueue(self.cluster, q) return super(NsxPluginV2, self).create_qos_queue(context, qos_queue) def delete_qos_queue(self, context, queue_id, raise_in_use=True): filters = {'queue_id': [queue_id]} queues = self._get_port_queue_bindings(context, filters) if queues: if raise_in_use: raise qos.QueueInUseByPort() else: return queuelib.delete_lqueue(self.cluster, queue_id) return super(NsxPluginV2, self).delete_qos_queue(context, queue_id) vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/0000775000175100017510000000000013244524600020735 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/md_proxy.py0000666000175100017510000007407313244523413023166 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import hmac import eventlet import netaddr from neutron_lib import constants from neutron_lib import context as neutron_context from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.common import utils from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx_v.vshield import ( nsxv_loadbalancer as nsxv_lb) from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield.common import exceptions from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.services.lbaas.nsx_v import lbaas_common METADATA_POOL_NAME = 'MDSrvPool' METADATA_VSE_NAME = 'MdSrv' METADATA_IP_ADDR = '169.254.169.254' METADATA_TCP_PORT = 80 METADATA_HTTPS_PORT = 443 METADATA_HTTPS_VIP_PORT = 8775 INTERNAL_SUBNET = '169.254.128.0/17' MAX_INIT_THREADS = 3 NET_WAIT_INTERVAL = 240 NET_CHECK_INTERVAL = 10 EDGE_WAIT_INTERVAL = 900 EDGE_CHECK_INTERVAL = 10 LOG = logging.getLogger(__name__) DEFAULT_EDGE_FIREWALL_RULE = { 'name': 'VSERule', 'enabled': True, 'action': 'allow', 'source_vnic_groups': ['vse']} def get_router_fw_rules(): # build the allowed destination ports list int_ports = [METADATA_TCP_PORT, METADATA_HTTPS_PORT, METADATA_HTTPS_VIP_PORT] str_ports = [str(p) for p in int_ports] # the list of ports can be extended by configuration if cfg.CONF.nsxv.metadata_service_allowed_ports: str_metadata_ports = [str(p) for p in cfg.CONF.nsxv.metadata_service_allowed_ports] str_ports = str_ports + str_metadata_ports separator = ',' dest_ports = separator.join(str_ports) fw_rules = [ DEFAULT_EDGE_FIREWALL_RULE, { 'name': 'MDServiceIP', 'enabled': True, 'action': 'allow', 'destination_ip_address': [METADATA_IP_ADDR], 'protocol': 'tcp', 'destination_port': dest_ports }, { 'name': 'MDInterEdgeNet', 'enabled': True, 'action': 'deny', 'destination_ip_address': [INTERNAL_SUBNET] }] return fw_rules def get_db_internal_edge_ips(context, az_name): ip_list = [] edge_list = nsxv_db.get_nsxv_internal_edges_by_purpose( context.session, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE) if edge_list: # Take only the edges on this availability zone ip_list = [edge['ext_ip_address'] for edge in edge_list if nsxv_db.get_router_availability_zone( context.session, edge['router_id']) == az_name] return ip_list class NsxVMetadataProxyHandler(object): """A metadata proxy handler for a specific availability zone""" def __init__(self, nsxv_plugin, availability_zone): self.nsxv_plugin = nsxv_plugin context = neutron_context.get_admin_context() self.az = availability_zone # Init cannot run concurrently on multiple nodes with locking.LockManager.get_lock('nsx-metadata-init'): # if the core plugin is the TVD - we need to add project # plugin mapping for the internal project core_plugin = directory.get_plugin() if core_plugin.is_tvd_plugin(): try: core_plugin.create_project_plugin_map( context, {'project_plugin_map': {'plugin': projectpluginmap.NsxPlugins.NSX_V, 'project': nsxv_constants.INTERNAL_TENANT_ID}}, internal=True) except projectpluginmap.ProjectPluginAlreadyExists: pass self.internal_net, self.internal_subnet = ( self._get_internal_network_and_subnet(context)) self.proxy_edge_ips = self._get_proxy_edges(context) def _create_metadata_internal_network(self, context, cidr): # Neutron requires a network to have some tenant_id tenant_id = nsxv_constants.INTERNAL_TENANT_ID net_name = 'inter-edge-net' if not self.az.is_default(): net_name = '%s-%s' % (net_name, self.az.name) net_data = {'network': {'name': net_name, 'admin_state_up': True, 'port_security_enabled': False, 'shared': False, 'availability_zone_hints': [self.az.name], 'tenant_id': tenant_id}} net = self.nsxv_plugin.create_network(context, net_data) subnet_data = {'subnet': {'cidr': cidr, 'name': 'inter-edge-subnet', 'gateway_ip': constants.ATTR_NOT_SPECIFIED, 'allocation_pools': constants.ATTR_NOT_SPECIFIED, 'ip_version': 4, 'dns_nameservers': constants.ATTR_NOT_SPECIFIED, 'host_routes': constants.ATTR_NOT_SPECIFIED, 'enable_dhcp': False, 'network_id': net['id'], 'tenant_id': tenant_id}} subnet = self.nsxv_plugin.create_subnet( context, subnet_data) return net['id'], subnet['id'] def _get_internal_net_by_az(self, context): # Get the internal network for the current az int_net = nsxv_db.get_nsxv_internal_network_for_az( context.session, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE, self.az.name) if int_net: return int_net['network_id'] def _get_internal_network_and_subnet(self, context): # Try to find internal net, internal subnet. If not found, create new internal_net = self._get_internal_net_by_az(context) internal_subnet = None if internal_net: internal_subnet = self.nsxv_plugin.get_subnets( context, fields=['id'], filters={'network_id': [internal_net]})[0]['id'] if internal_net is None or internal_subnet is None: if cfg.CONF.nsxv.metadata_initializer: # Couldn't find net, subnet - create new try: internal_net, internal_subnet = ( self._create_metadata_internal_network( context, INTERNAL_SUBNET)) except Exception as e: nsxv_db.delete_nsxv_internal_network( context.session, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE, internal_net) # if network is created, clean up if internal_net: self.nsxv_plugin.delete_network(context, internal_net) error = (_("Exception %s while creating internal " "network for metadata service") % e) LOG.exception(error) raise nsxv_exc.NsxPluginException(err_msg=error) # Update the new network_id in DB nsxv_db.create_nsxv_internal_network( context.session, nsxv_constants.INTER_EDGE_PURPOSE, self.az.name, internal_net) else: error = _('Metadata initialization is incomplete on ' 'initializer node') raise nsxv_exc.NsxPluginException(err_msg=error) return internal_net, internal_subnet def _get_edge_internal_ip(self, context, rtr_id): filters = { 'network_id': [self.internal_net], 'device_id': [rtr_id]} ports = self.nsxv_plugin.get_ports(context, filters=filters) if ports: return ports[0]['fixed_ips'][0]['ip_address'] else: LOG.error("No port found for metadata for %s", rtr_id) def _get_edge_rtr_id_by_ext_ip(self, context, edge_ip): rtr_list = nsxv_db.get_nsxv_internal_edge( context.session, edge_ip) if rtr_list: return rtr_list[0]['router_id'] def _get_edge_id_by_rtr_id(self, context, rtr_id): binding = nsxv_db.get_nsxv_router_binding( context.session, rtr_id) if binding: return binding['edge_id'] def _get_proxy_edges(self, context): proxy_edge_ips = [] db_edge_ips = get_db_internal_edge_ips(context, self.az.name) if len(db_edge_ips) > len(self.az.mgt_net_proxy_ips): error = (_('Number of configured metadata proxy IPs is smaller ' 'than number of Edges which are already provisioned ' 'for availability zone %s'), self.az.name) raise nsxv_exc.NsxPluginException(err_msg=error) pool = eventlet.GreenPool(min(MAX_INIT_THREADS, len(self.az.mgt_net_proxy_ips))) # Edge IPs that exist in both lists have to be validated that their # Edge appliance settings are valid for edge_inner_ip in pool.imap( self._setup_proxy_edge_route_and_connectivity, list(set(db_edge_ips) & set(self.az.mgt_net_proxy_ips))): proxy_edge_ips.append(edge_inner_ip) # Edges that exist only in the CFG list, should be paired with Edges # that exist only in the DB list. The existing Edge from the list will # be reconfigured to match the new config edge_to_convert_ips = ( list(set(db_edge_ips) - set(self.az.mgt_net_proxy_ips))) edge_ip_to_set = ( list(set(self.az.mgt_net_proxy_ips) - set(db_edge_ips))) if edge_to_convert_ips: if cfg.CONF.nsxv.metadata_initializer: for edge_inner_ip in pool.imap( self._setup_proxy_edge_external_interface_ip, zip(edge_to_convert_ips, edge_ip_to_set)): proxy_edge_ips.append(edge_inner_ip) else: error = _('Metadata initialization is incomplete on ' 'initializer node') raise nsxv_exc.NsxPluginException(err_msg=error) # Edges that exist in the CFG list but do not have a matching DB # element will be created. remaining_cfg_ips = edge_ip_to_set[len(edge_to_convert_ips):] if remaining_cfg_ips: if cfg.CONF.nsxv.metadata_initializer: for edge_inner_ip in pool.imap( self._setup_new_proxy_edge, remaining_cfg_ips): proxy_edge_ips.append(edge_inner_ip) pool.waitall() else: error = _('Metadata initialization is incomplete on ' 'initializer node') raise nsxv_exc.NsxPluginException(err_msg=error) return proxy_edge_ips def _setup_proxy_edge_route_and_connectivity(self, rtr_ext_ip, rtr_id=None, edge_id=None): # Use separate context per each as we use this in tread context context = neutron_context.get_admin_context() if not rtr_id: rtr_id = self._get_edge_rtr_id_by_ext_ip(context, rtr_ext_ip) if not edge_id: edge_id = self._get_edge_id_by_rtr_id(context, rtr_id) if not rtr_id or not edge_id: # log this error and return without the ip, but don't fail LOG.error("Failed find edge for router %(rtr_id)s with ip " "%(rtr_ext_ip)s", {'rtr_id': rtr_id, 'rtr_ext_ip': rtr_ext_ip}) return # Read and validate DGW. If different, replace with new value try: # This may fail if the edge was deleted on backend h, routes = self.nsxv_plugin.nsx_v.vcns.get_routes(edge_id) except exceptions.ResourceNotFound as e: # log this error and return without the ip, but don't fail LOG.error("Failed to get routes for metadata proxy edge " "%(edge)s: %(err)s", {'edge': edge_id, 'err': e}) return dgw = routes.get('defaultRoute', {}).get('gatewayAddress') if dgw != self.az.mgt_net_default_gateway: if cfg.CONF.nsxv.metadata_initializer: self.nsxv_plugin._update_routes( context, rtr_id, self.az.mgt_net_default_gateway) else: error = _('Metadata initialization is incomplete on ' 'initializer node') raise nsxv_exc.NsxPluginException(err_msg=error) # Read and validate connectivity h, if_data = self.nsxv_plugin.nsx_v.get_interface( edge_id, vcns_const.EXTERNAL_VNIC_INDEX) cur_ip = if_data.get('addressGroups', {} ).get('addressGroups', {} )[0].get('primaryAddress') cur_pgroup = if_data['portgroupId'] if (if_data and cur_pgroup != self.az.mgt_net_moid or cur_ip != rtr_ext_ip): if cfg.CONF.nsxv.metadata_initializer: self.nsxv_plugin.nsx_v.update_interface( rtr_id, edge_id, vcns_const.EXTERNAL_VNIC_INDEX, self.az.mgt_net_moid, address=rtr_ext_ip, netmask=self.az.mgt_net_proxy_netmask, secondary=[]) else: error = _('Metadata initialization is incomplete on ' 'initializer node') raise nsxv_exc.NsxPluginException(err_msg=error) # Read and validate LB pool member configuration # When the Nova IP address is changed in the ini file, we should apply # this change to the LB pool lb_obj = nsxv_lb.NsxvLoadbalancer.get_loadbalancer( self.nsxv_plugin.nsx_v.vcns, edge_id) vs = lb_obj.virtual_servers.get(METADATA_VSE_NAME) update_md_proxy = False if vs: md_members = {member.payload['ipAddress']: member.payload['name'] for member in vs.default_pool.members.values()} if len(cfg.CONF.nsxv.nova_metadata_ips) == len(md_members): m_ips = md_members.keys() m_to_convert = (list(set(m_ips) - set(cfg.CONF.nsxv.nova_metadata_ips))) m_ip_to_set = (list(set(cfg.CONF.nsxv.nova_metadata_ips) - set(m_ips))) if m_to_convert or m_ip_to_set: update_md_proxy = True for m_ip in m_to_convert: m_name = md_members[m_ip] vs.default_pool.members[m_name].payload['ipAddress'] = ( m_ip_to_set.pop()) else: error = _('Number of metadata members should not change') raise nsxv_exc.NsxPluginException(err_msg=error) try: # This may fail if the edge is powered off right now if update_md_proxy: lb_obj.submit_to_backend(self.nsxv_plugin.nsx_v.vcns, edge_id) except exceptions.RequestBad as e: # log the error and continue LOG.error("Failed to update load balancer on metadata " "proxy edge %(edge)s: %(err)s", {'edge': edge_id, 'err': e}) edge_ip = self._get_edge_internal_ip(context, rtr_id) if edge_ip: return edge_ip def _setup_proxy_edge_external_interface_ip(self, rtr_ext_ips): # Use separate context per each as we use this in tread context context = neutron_context.get_admin_context() rtr_old_ext_ip, rtr_new_ext_ip = rtr_ext_ips rtr_id = self._get_edge_rtr_id_by_ext_ip(context, rtr_old_ext_ip) edge_id = self._get_edge_id_by_rtr_id(context, rtr_id) # Replace DB entry as we cannot update the table PK nsxv_db.delete_nsxv_internal_edge(context.session, rtr_old_ext_ip) edge_ip = self._setup_proxy_edge_route_and_connectivity( rtr_new_ext_ip, rtr_id, edge_id) nsxv_db.create_nsxv_internal_edge( context.session, rtr_new_ext_ip, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE, rtr_id) if edge_ip: return edge_ip def _setup_new_proxy_edge(self, rtr_ext_ip): # Use separate context per each as we use this in tread context context = neutron_context.get_admin_context() rtr_id = None try: rtr_name = 'metadata_proxy_router' if not self.az.is_default(): rtr_name = '%s-%s' % (rtr_name, self.az.name) router_data = { 'router': { 'name': rtr_name, 'admin_state_up': True, 'router_type': 'exclusive', 'availability_zone_hints': [self.az.name], 'tenant_id': nsxv_constants.INTERNAL_TENANT_ID}} rtr = self.nsxv_plugin.create_router( context, router_data, allow_metadata=False) rtr_id = rtr['id'] edge_id = self._get_edge_id_by_rtr_id(context, rtr_id) if not edge_id: LOG.error('No edge create for router - %s', rtr_id) if rtr_id: self.nsxv_plugin.delete_router(context, rtr_id) return self.nsxv_plugin.nsx_v.update_interface( rtr['id'], edge_id, vcns_const.EXTERNAL_VNIC_INDEX, self.az.mgt_net_moid, address=rtr_ext_ip, netmask=self.az.mgt_net_proxy_netmask, secondary=[]) port_data = { 'port': { 'network_id': self.internal_net, 'name': None, 'admin_state_up': True, 'device_id': rtr_id, 'device_owner': (constants.DEVICE_OWNER_NETWORK_PREFIX + 'md_interface'), 'fixed_ips': constants.ATTR_NOT_SPECIFIED, 'mac_address': constants.ATTR_NOT_SPECIFIED, 'port_security_enabled': False, 'tenant_id': nsxv_constants.INTERNAL_TENANT_ID}} port = self.nsxv_plugin.base_create_port(context, port_data) address_groups = self._get_address_groups( context, self.internal_net, rtr_id, is_proxy=True) edge_ip = port['fixed_ips'][0]['ip_address'] with locking.LockManager.get_lock(edge_id): edge_utils.update_internal_interface( self.nsxv_plugin.nsx_v, context, rtr_id, self.internal_net, address_groups) self._setup_metadata_lb(rtr_id, port['fixed_ips'][0]['ip_address'], cfg.CONF.nsxv.nova_metadata_port, cfg.CONF.nsxv.nova_metadata_port, cfg.CONF.nsxv.nova_metadata_ips, proxy_lb=True) firewall_rules = [ DEFAULT_EDGE_FIREWALL_RULE, { 'action': 'allow', 'enabled': True, 'source_ip_address': [INTERNAL_SUBNET]}] edge_utils.update_firewall( self.nsxv_plugin.nsx_v, context, rtr_id, {'firewall_rule_list': firewall_rules}, allow_external=False) if self.az.mgt_net_default_gateway: self.nsxv_plugin._update_routes( context, rtr_id, self.az.mgt_net_default_gateway) nsxv_db.create_nsxv_internal_edge( context.session, rtr_ext_ip, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE, rtr_id) return edge_ip except Exception as e: LOG.exception("Exception %s while creating internal edge " "for metadata service", e) ports = self.nsxv_plugin.get_ports( context, filters={'device_id': [rtr_id]}) for port in ports: self.nsxv_plugin.delete_port(context, port['id'], l3_port_check=True, nw_gw_port_check=True, allow_delete_internal=True) nsxv_db.delete_nsxv_internal_edge( context.session, rtr_ext_ip) if rtr_id: self.nsxv_plugin.delete_router(context, rtr_id) def _get_address_groups(self, context, network_id, device_id, is_proxy): filters = {'network_id': [network_id], 'device_id': [device_id]} ports = self.nsxv_plugin.get_ports(context, filters=filters) subnets = self.nsxv_plugin.get_subnets(context, filters=filters) address_groups = [] for subnet in subnets: address_group = {} net = netaddr.IPNetwork(subnet['cidr']) address_group['subnetMask'] = str(net.netmask) address_group['subnetPrefixLength'] = str(net.prefixlen) for port in ports: fixed_ips = port['fixed_ips'] for fip in fixed_ips: s_id = fip['subnet_id'] ip_addr = fip['ip_address'] if s_id == subnet['id'] and netaddr.valid_ipv4(ip_addr): address_group['primaryAddress'] = ip_addr break # For Edge appliances which aren't the metadata proxy Edge # we add the metadata IP address if not is_proxy and network_id == self.internal_net: address_group['secondaryAddresses'] = { 'type': 'secondary_addresses', 'ipAddress': [METADATA_IP_ADDR]} address_groups.append(address_group) return address_groups def _create_ssl_cert(self, edge_id=None): # Create a self signed certificate in the backend if both Cert details # and private key are not supplied in nsx.ini if (not cfg.CONF.nsxv.metadata_nova_client_cert and not cfg.CONF.nsxv.metadata_nova_client_priv_key): h = self.nsxv_plugin.nsx_v.vcns.create_csr(edge_id)[0] # Extract the CSR ID from header csr_id = lbaas_common.extract_resource_id(h['location']) # Create a self signed certificate cert = self.nsxv_plugin.nsx_v.vcns.create_csr_cert(csr_id)[1] cert_id = cert['objectId'] else: # Raise an error if either the Cert path or the private key is not # configured error = None if not cfg.CONF.nsxv.metadata_nova_client_cert: error = _('Metadata certificate path not configured') elif not cfg.CONF.nsxv.metadata_nova_client_priv_key: error = _('Metadata client private key not configured') if error: raise nsxv_exc.NsxPluginException(err_msg=error) pem_encoding = utils.read_file( cfg.CONF.nsxv.metadata_nova_client_cert) priv_key = utils.read_file( cfg.CONF.nsxv.metadata_nova_client_priv_key) request = { 'pemEncoding': pem_encoding, 'privateKey': priv_key} cert = self.nsxv_plugin.nsx_v.vcns.upload_edge_certificate( edge_id, request)[1] cert_id = cert.get('certificates')[0]['objectId'] return cert_id def _setup_metadata_lb(self, rtr_id, vip, v_port, s_port, member_ips, proxy_lb=False, context=None): if context is None: context = neutron_context.get_admin_context() edge_id = self._get_edge_id_by_rtr_id(context, rtr_id) LOG.debug('Setting up Edge device %s', edge_id) lb_obj = nsxv_lb.NsxvLoadbalancer() protocol = 'HTTP' ssl_pass_through = False cert_id = None # Set protocol to HTTPS with default port of 443 if metadata_insecure # is set to False. if not cfg.CONF.nsxv.metadata_insecure: protocol = 'HTTPS' if proxy_lb: v_port = METADATA_HTTPS_VIP_PORT else: v_port = METADATA_HTTPS_PORT # Create the certificate on the backend cert_id = self._create_ssl_cert(edge_id) ssl_pass_through = proxy_lb mon_type = protocol if proxy_lb else 'tcp' # Create virtual server virt_srvr = nsxv_lb.NsxvLBVirtualServer( name=METADATA_VSE_NAME, ip_address=vip, protocol=protocol, port=v_port) # For router Edge, we add X-LB-Proxy-ID header if not proxy_lb: md_app_rule = nsxv_lb.NsxvLBAppRule( 'insert-mdp', 'reqadd X-Metadata-Provider:' + edge_id) virt_srvr.add_app_rule(md_app_rule) # When shared proxy is configured, insert authentication string if cfg.CONF.nsxv.metadata_shared_secret: signature = hmac.new( cfg.CONF.nsxv.metadata_shared_secret, edge_id, hashlib.sha256).hexdigest() sign_app_rule = nsxv_lb.NsxvLBAppRule( 'insert-auth', 'reqadd X-Metadata-Provider-Signature:' + signature) virt_srvr.add_app_rule(sign_app_rule) # Create app profile # XFF is inserted in router LBs app_profile = nsxv_lb.NsxvLBAppProfile( name='MDSrvProxy', template=protocol, server_ssl_enabled=not cfg.CONF.nsxv.metadata_insecure, ssl_pass_through=ssl_pass_through, insert_xff=not proxy_lb, client_ssl_cert=cert_id) virt_srvr.set_app_profile(app_profile) # Create pool, members and monitor pool = nsxv_lb.NsxvLBPool( name=METADATA_POOL_NAME) monitor = nsxv_lb.NsxvLBMonitor(name='MDSrvMon', mon_type=mon_type.lower()) pool.add_monitor(monitor) i = 0 for member_ip in member_ips: i += 1 member = nsxv_lb.NsxvLBPoolMember( name='Member-%d' % i, ip_address=member_ip, port=s_port, monitor_port=s_port) pool.add_member(member) virt_srvr.set_default_pool(pool) lb_obj.add_virtual_server(virt_srvr) lb_obj.submit_to_backend(self.nsxv_plugin.nsx_v.vcns, edge_id) def configure_router_edge(self, context, rtr_id): ctx = context.elevated() # Connect router interface to inter-edge network port_data = { 'port': { 'network_id': self.internal_net, 'name': None, 'admin_state_up': True, 'device_id': rtr_id, 'device_owner': constants.DEVICE_OWNER_ROUTER_GW, 'fixed_ips': constants.ATTR_NOT_SPECIFIED, 'mac_address': constants.ATTR_NOT_SPECIFIED, 'port_security_enabled': False, 'tenant_id': nsxv_constants.INTERNAL_TENANT_ID}} self.nsxv_plugin.base_create_port(ctx, port_data) address_groups = self._get_address_groups( ctx, self.internal_net, rtr_id, is_proxy=False) edge_utils.update_internal_interface( self.nsxv_plugin.nsx_v, context, rtr_id, self.internal_net, address_groups=address_groups) self._setup_metadata_lb(rtr_id, METADATA_IP_ADDR, METADATA_TCP_PORT, cfg.CONF.nsxv.nova_metadata_port, self.proxy_edge_ips, proxy_lb=False, context=context) def cleanup_router_edge(self, context, rtr_id, warn=False): filters = { 'network_id': [self.internal_net], 'device_id': [rtr_id]} ctx = context.elevated() ports = self.nsxv_plugin.get_ports(ctx, filters=filters) if ports: if warn: LOG.warning("cleanup_router_edge found port %(port)s for " "router %(router)s - deleting it now.", {'port': ports[0]['id'], 'router': rtr_id}) try: self.nsxv_plugin.delete_port( ctx, ports[0]['id'], l3_port_check=False) except Exception as e: LOG.error("Failed to delete md_proxy port %(port)s: " "%(e)s", {'port': ports[0]['id'], 'e': e}) def is_md_subnet(self, subnet_id): return self.internal_subnet == subnet_id vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/availability_zones.py0000666000175100017510000002553513244523345025220 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from vmware_nsx._i18n import _ from vmware_nsx.common import availability_zones as common_az from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc DEFAULT_NAME = common_az.DEFAULT_NAME class NsxVAvailabilityZone(common_az.ConfiguredAvailabilityZone): def init_from_config_line(self, config_line): values = config_line.split(':') if len(values) < 4 or len(values) > 5: raise nsx_exc.NsxInvalidConfiguration( opt_name="availability_zones", opt_value=config_line, reason=_("Expected 4 or 5 values per zone")) self.resource_pool = values[1] self.datastore_id = values[2] # validate the edge_ha if values[3].lower() == "true": self.edge_ha = True elif values[3].lower() == "false": self.edge_ha = False else: raise nsx_exc.NsxInvalidConfiguration( opt_name="availability_zones", opt_value=config_line, reason=_("Expected the 4th value to be true/false")) # HA datastore id is relevant only with edge_ha if not self.edge_ha and len(values) == 5: raise nsx_exc.NsxInvalidConfiguration( opt_name="availability_zones", opt_value=config_line, reason=_("Expected HA datastore ID only when edge_ha is " "enabled")) self.ha_datastore_id = values[4] if len(values) == 5 else None # Some parameters are not supported in this format. # using the global ones instead. self.ha_placement_random = cfg.CONF.nsxv.ha_placement_random self.datacenter_moid = cfg.CONF.nsxv.datacenter_moid self.backup_edge_pool = cfg.CONF.nsxv.backup_edge_pool self.external_network = cfg.CONF.nsxv.external_network self.vdn_scope_id = cfg.CONF.nsxv.vdn_scope_id self.dvs_id = cfg.CONF.nsxv.dvs_id self.edge_host_groups = cfg.CONF.nsxv.edge_host_groups self.exclusive_dhcp_edge = cfg.CONF.nsxv.exclusive_dhcp_edge self.bind_floatingip_to_all_interfaces = ( cfg.CONF.nsxv.bind_floatingip_to_all_interfaces) # No support for metadata per az self.az_metadata_support = False self.mgt_net_moid = None self.mgt_net_proxy_ips = [] self.mgt_net_proxy_netmask = None self.mgt_net_default_gateway = None def init_from_config_section(self, az_name): az_info = config.get_nsxv_az_opts(az_name) self.resource_pool = az_info.get('resource_pool_id') if not self.resource_pool: raise nsx_exc.NsxInvalidConfiguration( opt_name="resource_pool_id", opt_value='None', reason=(_("resource_pool_id for availability zone %s " "must be defined") % az_name)) self.datastore_id = az_info.get('datastore_id') if not self.datastore_id: raise nsx_exc.NsxInvalidConfiguration( opt_name="datastore_id", opt_value='None', reason=(_("datastore_id for availability zone %s " "must be defined") % az_name)) self.edge_ha = az_info.get('edge_ha', False) # The HA datastore can be empty self.ha_datastore_id = (az_info.get('ha_datastore_id') if self.edge_ha else None) if self.ha_datastore_id and not self.edge_ha: raise nsx_exc.NsxInvalidConfiguration( opt_name="ha_datastore_id", opt_value=self.ha_datastore_id, reason=_("Expected HA datastore ID only when edge_ha is " "enabled for availability zone %s") % az_name) # The optional parameters will get the global values if not # defined for this AZ self.ha_placement_random = az_info.get('ha_placement_random') if self.ha_placement_random is None: self.ha_placement_random = ( cfg.CONF.nsxv.ha_placement_random) self.datacenter_moid = az_info.get('datacenter_moid') if not self.datacenter_moid: self.datacenter_moid = cfg.CONF.nsxv.datacenter_moid self.backup_edge_pool = az_info.get('backup_edge_pool', []) if not self.backup_edge_pool: self.backup_edge_pool = cfg.CONF.nsxv.backup_edge_pool self.external_network = az_info.get('external_network') if not self.external_network: self.external_network = cfg.CONF.nsxv.external_network self.vdn_scope_id = az_info.get('vdn_scope_id') if not self.vdn_scope_id: self.vdn_scope_id = cfg.CONF.nsxv.vdn_scope_id self.dvs_id = az_info.get('dvs_id') if not self.dvs_id: self.dvs_id = cfg.CONF.nsxv.dvs_id self.edge_host_groups = az_info.get('edge_host_groups', []) if not self.edge_host_groups: self.edge_host_groups = cfg.CONF.nsxv.edge_host_groups self.exclusive_dhcp_edge = az_info.get('exclusive_dhcp_edge', False) self.bind_floatingip_to_all_interfaces = az_info.get( 'bind_floatingip_to_all_interfaces', False) # Support for metadata per az only if configured, and different # from the global one self.mgt_net_proxy_ips = az_info.get('mgt_net_proxy_ips') if self.mgt_net_proxy_ips: # make sure there are no over lapping ips with the # global configuration if (set(self.mgt_net_proxy_ips) & set(cfg.CONF.nsxv.mgt_net_proxy_ips)): raise nsx_exc.NsxInvalidConfiguration( opt_name="mgt_net_proxy_ips", opt_value='None', reason=(_("mgt_net_proxy_ips for availability zone " "%s must be different from global one") % az_name)) self.az_metadata_support = True self.mgt_net_moid = az_info.get('mgt_net_moid') if not self.mgt_net_moid: self.mgt_net_moid = cfg.CONF.nsxv.mgt_net_moid self.mgt_net_proxy_netmask = az_info.get( 'mgt_net_proxy_netmask') if not self.mgt_net_proxy_netmask: self.mgt_net_proxy_netmask = ( cfg.CONF.nsxv.mgt_net_proxy_netmask) self.mgt_net_default_gateway = az_info.get( 'mgt_net_default_gateway') if not self.mgt_net_default_gateway: self.mgt_net_default_gateway = ( cfg.CONF.nsxv.mgt_net_default_gateway) else: self.az_metadata_support = False self.mgt_net_moid = None self.mgt_net_proxy_ips = [] self.mgt_net_proxy_netmask = None self.mgt_net_default_gateway = None def init_default_az(self): # use the default configuration self.resource_pool = cfg.CONF.nsxv.resource_pool_id self.datastore_id = cfg.CONF.nsxv.datastore_id self.edge_ha = cfg.CONF.nsxv.edge_ha self.ha_datastore_id = cfg.CONF.nsxv.ha_datastore_id self.ha_placement_random = cfg.CONF.nsxv.ha_placement_random self.datacenter_moid = cfg.CONF.nsxv.datacenter_moid self.backup_edge_pool = cfg.CONF.nsxv.backup_edge_pool self.az_metadata_support = True self.mgt_net_moid = cfg.CONF.nsxv.mgt_net_moid self.mgt_net_proxy_ips = cfg.CONF.nsxv.mgt_net_proxy_ips self.mgt_net_proxy_netmask = cfg.CONF.nsxv.mgt_net_proxy_netmask self.mgt_net_default_gateway = ( cfg.CONF.nsxv.mgt_net_default_gateway) self.external_network = cfg.CONF.nsxv.external_network self.vdn_scope_id = cfg.CONF.nsxv.vdn_scope_id self.dvs_id = cfg.CONF.nsxv.dvs_id self.edge_host_groups = cfg.CONF.nsxv.edge_host_groups self.exclusive_dhcp_edge = cfg.CONF.nsxv.exclusive_dhcp_edge self.bind_floatingip_to_all_interfaces = ( cfg.CONF.nsxv.bind_floatingip_to_all_interfaces) def supports_metadata(self): # Return True if this az has it's own metadata configuration # If False - it uses the global metadata (if defined) return self.az_metadata_support class NsxVAvailabilityZones(common_az.ConfiguredAvailabilityZones): def __init__(self, use_tvd_config=False): if use_tvd_config: default_azs = cfg.CONF.nsx_tvd.nsx_v_default_availability_zones else: default_azs = cfg.CONF.default_availability_zones super(NsxVAvailabilityZones, self).__init__( cfg.CONF.nsxv.availability_zones, NsxVAvailabilityZone, default_availability_zones=default_azs) def get_inventory(self): """Return a set of relevant resources in all the availability zones """ resources = set() for az in self.list_availability_zones_objects(): if az.resource_pool: resources.add(az.resource_pool) if az.datastore_id: resources.add(az.datastore_id) if az.ha_datastore_id: resources.add(az.ha_datastore_id) return resources def get_unique_non_default_param(self, param_name): """Return a set of all configured values of one of az params Ignore the value of the default AZ """ resources = set() default_val = None for az in self.list_availability_zones_objects(): az_val = getattr(az, param_name) if az.is_default(): default_val = az_val elif az_val: resources.add(az_val) # remove the default value if default_val: resources.discard(default_val) return resources def get_additional_vdn_scope(self): return self.get_unique_non_default_param("vdn_scope_id") def get_additional_mgt_net(self): return self.get_unique_non_default_param("mgt_net_moid") def get_additional_ext_net(self): return self.get_unique_non_default_param("external_network") def get_additional_datacenter(self): return self.get_unique_non_default_param("datacenter_moid") def get_additional_dvs_ids(self): return self.get_unique_non_default_param("dvs_id") vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/drivers/0000775000175100017510000000000013244524600022413 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/drivers/abstract_router_driver.py0000666000175100017510000001262713244523345027562 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from neutron.db import l3_db from neutron.db import models_v2 from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.common import nsxv_constants from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield import edge_utils @six.add_metaclass(abc.ABCMeta) class RouterAbstractDriver(object): """Abstract router driver that expose API for nsxv plugin.""" @abc.abstractmethod def get_type(self): pass @abc.abstractmethod def create_router(self, context, lrouter, appliance_size=None, allow_metadata=True): pass @abc.abstractmethod def update_router(self, context, router_id, router): pass @abc.abstractmethod def delete_router(self, context, router_id): pass @abc.abstractmethod def update_routes(self, context, router_id, nexthop): pass @abc.abstractmethod def _update_router_gw_info(self, context, router_id, info): pass @abc.abstractmethod def add_router_interface(self, context, router_id, interface_info): pass @abc.abstractmethod def remove_router_interface(self, context, router_id, interface_info): pass @abc.abstractmethod def _update_edge_router(self, context, router_id): pass class RouterBaseDriver(RouterAbstractDriver): def __init__(self, plugin): self.plugin = plugin self.nsx_v = plugin.nsx_v self.edge_manager = plugin.edge_manager self.vcns = self.nsx_v.vcns self._availability_zones = nsx_az.NsxVAvailabilityZones() def _notify_after_router_edge_association(self, context, router): registry.notify(nsxv_constants.SERVICE_EDGE, events.AFTER_CREATE, self, context=context, router=router) def _notify_before_router_edge_association(self, context, router, edge_id=None): registry.notify(nsxv_constants.SERVICE_EDGE, events.BEFORE_DELETE, self, context=context, router=router, edge_id=edge_id) def _get_external_network_id_by_router(self, context, router_id): """Get router's external network id if it has.""" router = self.plugin.get_router(context, router_id) ports_qry = context.session.query(models_v2.Port) gw_ports = ports_qry.filter_by( device_id=router_id, device_owner=l3_db.DEVICE_OWNER_ROUTER_GW, id=router['gw_port_id']).all() if gw_ports: return gw_ports[0]['network_id'] def _get_edge_id_or_raise(self, context, router_id): edge_id = edge_utils.get_router_edge_id(context, router_id) if not edge_id: error = (_("Failed to get router %(rid)s edge Id") % {'rid': router_id}) raise nsxv_exc.NsxPluginException(err_msg=error) return edge_id def update_nat_rules(self, context, router, router_id): self.plugin._update_nat_rules(context, router, router_id) def update_router_interface_ip(self, context, router_id, port_id, int_net_id, old_ip, new_ip, subnet_mask): """Update the fixed ip of a router interface. This implementation will not work for distributed routers, and there is a different implementation in that driver class """ # get the edge-id of this router edge_id = edge_utils.get_router_edge_id(context, router_id) if not edge_id: # This may be a shared router that was not attached to an edge yet return # find out if the port is uplink or internal router = self.plugin._get_router(context, router_id) is_uplink = (port_id == router.gw_port_id) # update the edge interface configuration self.edge_manager.update_interface_addr( context, edge_id, old_ip, new_ip, subnet_mask, is_uplink=is_uplink) # Also update the nat rules if is_uplink: self.update_nat_rules(context, router, router_id) def get_router_az(self, lrouter): return self.plugin.get_router_az(lrouter) def get_router_az_and_flavor_by_id(self, context, router_id): lrouter = self.plugin.get_router(context, router_id) return (self.get_router_az(lrouter), lrouter.get('flavor_id')) def get_router_az_by_id(self, context, router_id): lrouter = self.plugin.get_router(context, router_id) return self.get_router_az(lrouter) def _update_nexthop(self, context, router_id, newnexthop): """Update the router edge on gateway subnet default gateway change.""" self.plugin._update_routes(context, router_id, newnexthop) vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/drivers/exclusive_router_driver.py0000666000175100017510000003447313244523345027771 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron.db import api as db_api from neutron_lib import constants as n_consts from neutron_lib.plugins import constants as plugin_const from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.drivers import ( abstract_router_driver as router_driver) from vmware_nsx.plugins.nsx_v import plugin as nsx_v from vmware_nsx.plugins.nsx_v.vshield import edge_utils LOG = logging.getLogger(__name__) class RouterExclusiveDriver(router_driver.RouterBaseDriver): def get_type(self): return "exclusive" def create_router(self, context, lrouter, appliance_size=None, allow_metadata=True): availability_zone = self.get_router_az(lrouter) self.edge_manager.create_lrouter( context, lrouter, dist=False, appliance_size=appliance_size, availability_zone=availability_zone) if allow_metadata: self.plugin.get_metadata_proxy_handler( availability_zone.name).configure_router_edge( context, lrouter['id']) def update_router(self, context, router_id, router): r = router['router'] is_routes_update = True if 'routes' in r else False gw_info = self.plugin._extract_external_gw(context, router, is_extract=True) super(nsx_v.NsxVPluginV2, self.plugin).update_router( context, router_id, router) if gw_info != n_consts.ATTR_NOT_SPECIFIED: self.plugin._update_router_gw_info(context, router_id, gw_info, is_routes_update) elif is_routes_update: # here is used to handle routes which tenant updates. router_db = self.plugin._get_router(context, router_id) nexthop = self.plugin._get_external_attachment_info( context, router_db)[2] with locking.LockManager.get_lock( self._get_router_edge_id(context, router_id)): self.plugin._update_subnets_and_dnat_firewall(context, router_db) self.update_routes(context, router_id, nexthop) if 'admin_state_up' in r: self.plugin._update_router_admin_state( context, router_id, self.get_type(), r['admin_state_up']) if 'name' in r: self.edge_manager.rename_lrouter(context, router_id, r['name']) if r.get('router_size'): self.edge_manager.resize_lrouter(context, router_id, r['router_size']) return self.plugin.get_router(context, router_id) def detach_router(self, context, router_id, router): LOG.debug("Detach exclusive router id %s", router_id) router_db = self.plugin._get_router(context, router_id) self._notify_before_router_edge_association(context, router_db) self.edge_manager.unbind_router_on_edge(context, router_id) if self.plugin.metadata_proxy_handler: az = self.get_router_az_by_id(context, router_id) metadata_proxy_handler = self.plugin.get_metadata_proxy_handler( az.name) if metadata_proxy_handler: metadata_proxy_handler.cleanup_router_edge(context, router_id) def _build_router_data_from_db(self, router_db, router): """Return a new dictionary with all DB & requested router attributes """ router_attr = router['router'].copy() fields = ['status', 'name', 'admin_state_up', 'tenant_id', 'id'] for field in fields: if field not in router['router']: router_attr[field] = getattr(router_db, field) return router_attr def attach_router(self, context, router_id, router, appliance_size=None): router_db = self.plugin._get_router(context, router_id) # Add DB attributes to the router data structure # before creating it as an exclusive router router_attr = self._build_router_data_from_db(router_db, router) allow_metadata = True if self.plugin.metadata_proxy_handler else False self.create_router(context, router_attr, allow_metadata=allow_metadata, appliance_size=appliance_size) edge_id = edge_utils.get_router_edge_id(context, router_id) LOG.debug("Exclusive router %s attached to edge %s", router_id, edge_id) # add all internal interfaces of the router on edge intf_net_ids = ( self.plugin._get_internal_network_ids_by_router(context, router_id)) with locking.LockManager.get_lock(edge_id): for network_id in intf_net_ids: address_groups = self.plugin._get_address_groups( context, router_id, network_id) edge_utils.update_internal_interface( self.nsx_v, context, router_id, network_id, address_groups, router_db.admin_state_up) # Update external interface (which also update nat rules, routes, etc) external_net_id = self._get_external_network_id_by_router(context, router_id) gw_info = None if (external_net_id): gw_info = {'network_id': external_net_id, 'enable_snat': router_db.enable_snat} self.plugin._update_router_gw_info( context, router_id, gw_info, force_update=True) def delete_router(self, context, router_id): if self.plugin.metadata_proxy_handler: # The neutron router was already deleted, so we cannot get the AZ # from it. Get it from the router-bindings DB edge_id, az_name = self.plugin._get_edge_id_and_az_by_rtr_id( context, router_id) md_proxy = self.plugin.get_metadata_proxy_handler(az_name) if md_proxy: md_proxy.cleanup_router_edge(context, router_id) self.edge_manager.delete_lrouter(context, router_id, dist=False) def update_routes(self, context, router_id, nexthop): with locking.LockManager.get_lock( self._get_router_edge_id(context, router_id)): self.plugin._update_routes(context, router_id, nexthop) @db_api.retry_db_errors def _update_router_gw_info(self, context, router_id, info, is_routes_update=False, force_update=False): router = self.plugin._get_router(context, router_id) org_ext_net_id = router.gw_port_id and router.gw_port.network_id org_enable_snat = router.enable_snat orgaddr, orgmask, orgnexthop = ( self.plugin._get_external_attachment_info( context, router)) super(nsx_v.NsxVPluginV2, self.plugin)._update_router_gw_info( context, router_id, info, router=router) new_ext_net_id = router.gw_port_id and router.gw_port.network_id new_enable_snat = router.enable_snat newaddr, newmask, newnexthop = ( self.plugin._get_external_attachment_info( context, router)) edge_id = self._get_router_edge_id(context, router_id) with locking.LockManager.get_lock(edge_id): if ((new_ext_net_id != org_ext_net_id or force_update) and orgnexthop): # network changed, so need to remove default gateway before # vnic can be configured LOG.debug("Delete default gateway %s", orgnexthop) edge_utils.clear_gateway(self.nsx_v, context, router_id) secondary = self.plugin._get_floatingips_by_router( context, router_id) # Update external vnic if addr or mask is changed if orgaddr != newaddr or orgmask != newmask or force_update: self.edge_manager.update_external_interface( self.nsx_v, context, router_id, new_ext_net_id, newaddr, newmask, secondary=secondary) # Update SNAT rules if ext net changed # or ext net not changed but snat is changed. if (new_ext_net_id != org_ext_net_id or (new_ext_net_id == org_ext_net_id and new_enable_snat != org_enable_snat) or force_update): self.plugin._update_nat_rules(context, router) if (new_ext_net_id != org_ext_net_id or new_enable_snat != org_enable_snat or is_routes_update or force_update): self.plugin._update_subnets_and_dnat_firewall(context, router) # Update static routes in all. self.plugin._update_routes(context, router_id, newnexthop) if new_ext_net_id or force_update: self._notify_after_router_edge_association(context, router) def add_router_interface(self, context, router_id, interface_info): self.plugin._check_intf_number_of_router(context, router_id) info = super(nsx_v.NsxVPluginV2, self.plugin).add_router_interface( context, router_id, interface_info) router_db = self.plugin._get_router(context, router_id) subnet = self.plugin.get_subnet(context, info['subnet_id']) network_id = subnet['network_id'] address_groups = self.plugin._get_address_groups( context, router_id, network_id) with locking.LockManager.get_lock( self._get_router_edge_id(context, router_id)): edge_utils.update_internal_interface( self.nsx_v, context, router_id, network_id, address_groups, router_db['admin_state_up']) # Update edge's firewall rules to accept subnets flows. self.plugin._update_subnets_and_dnat_firewall(context, router_db) if router_db.gw_port and router_db.enable_snat: # Update Nat rules on external edge vnic self.plugin._update_nat_rules(context, router_db) return info def remove_router_interface(self, context, router_id, interface_info): # If a loadbalancer is attached to this Edge appliance, we cannot # detach the subnet from the exclusive router. subnet = interface_info.get('subnet_id') if not subnet and interface_info.get('port_id'): port = self.plugin.get_port(context, interface_info['port_id']) port_subnets = [ fixed_ip['subnet_id'] for fixed_ip in port.get( 'fixed_ips', [])] subnet = port_subnets[0] if subnet and self._check_lb_on_subnet(context, subnet, router_id): error = _('Cannot delete router %(rtr)s interface while ' 'loadbalancers are provisioned on attached ' 'subnet %(subnet)s') % {'rtr': router_id, 'subnet': subnet} raise nsxv_exc.NsxPluginException(err_msg=error) info = super(nsx_v.NsxVPluginV2, self.plugin).remove_router_interface( context, router_id, interface_info) router_db = self.plugin._get_router(context, router_id) subnet = self.plugin.get_subnet(context, info['subnet_id']) network_id = subnet['network_id'] with locking.LockManager.get_lock( self._get_router_edge_id(context, router_id)): if router_db.gw_port and router_db.enable_snat: # First update nat rules self.plugin._update_nat_rules(context, router_db) ports = self.plugin._get_router_interface_ports_by_network( context, router_id, network_id) self.plugin._update_subnets_and_dnat_firewall(context, router_db) # No subnet on the network connects to the edge vnic if not ports: edge_utils.delete_interface(self.nsx_v, context, router_id, network_id, dist=False) else: address_groups = self.plugin._get_address_groups( context, router_id, network_id) edge_utils.update_internal_interface(self.nsx_v, context, router_id, network_id, address_groups) return info def _check_lb_on_subnet(self, context, subnet_id, router_id): # Check lbaas dev_owner_v1 = 'neutron:' + plugin_const.LOADBALANCER dev_owner_v2 = 'neutron:' + plugin_const.LOADBALANCERV2 filters = {'device_owner': [dev_owner_v1, dev_owner_v2], 'fixed_ips': {'subnet_id': [subnet_id]}} ports = super(nsx_v.NsxVPluginV2, self.plugin).get_ports( context, filters=filters) edge_id = self._get_router_edge_id(context, router_id) lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding_by_edge( context.session, edge_id) return (len(ports) >= 1) and lb_binding def _update_edge_router(self, context, router_id): router = self.plugin._get_router(context.elevated(), router_id) with locking.LockManager.get_lock( self._get_router_edge_id(context, router_id)): self.plugin._update_external_interface(context, router) self.plugin._update_nat_rules(context, router) self.plugin._update_subnets_and_dnat_firewall(context, router) def _get_router_edge_id(self, context, router_id): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) return binding['edge_id'] vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/drivers/__init__.py0000666000175100017510000000000013244523345024521 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/drivers/shared_router_driver.py0000666000175100017510000013633613244523345027231 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_config import cfg from neutron.db import api as db_api from neutron.db import l3_db from neutron.db.models import l3 as l3_db_models from neutron.db import models_v2 from neutron_lib.api import validators from neutron_lib import constants from neutron_lib import exceptions as n_exc from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.db import nsxv_models from vmware_nsx.plugins.nsx_v.drivers import ( abstract_router_driver as router_driver) from vmware_nsx.plugins.nsx_v import md_proxy as nsx_v_md_proxy from vmware_nsx.plugins.nsx_v import plugin as nsx_v from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield import edge_utils LOG = logging.getLogger(__name__) class RouterSharedDriver(router_driver.RouterBaseDriver): def get_type(self): return "shared" def create_router(self, context, lrouter, appliance_size=None, allow_metadata=True): pass def _validate_no_routes(self, router): if (validators.is_attr_set(router.get('routes')) and len(router['routes']) > 0): msg = _("Cannot configure static routes on a shared router") raise n_exc.InvalidInput(error_message=msg) def update_router(self, context, router_id, router): r = router['router'] self._validate_no_routes(r) # If only the name and or description are updated. We do not need to # update the backend. if set(['name', 'description']) >= set(r.keys()): return super(nsx_v.NsxVPluginV2, self.plugin).update_router( context, router_id, router) edge_id = edge_utils.get_router_edge_id(context, router_id) if not edge_id: return super(nsx_v.NsxVPluginV2, self.plugin).update_router( context, router_id, router) else: with locking.LockManager.get_lock(str(edge_id)): gw_info = self.plugin._extract_external_gw( context, router, is_extract=True) super(nsx_v.NsxVPluginV2, self.plugin).update_router( context, router_id, router) if gw_info != constants.ATTR_NOT_SPECIFIED: self.plugin._update_router_gw_info(context, router_id, gw_info) if 'admin_state_up' in r: # If router was deployed on a different edge then # admin-state-up is already updated on the new edge. current_edge_id = ( edge_utils.get_router_edge_id(context, router_id)) if current_edge_id == edge_id: self.plugin._update_router_admin_state(context, router_id, self.get_type(), r['admin_state_up']) return self.plugin.get_router(context, router_id) def detach_router(self, context, router_id, router): LOG.debug("Detach shared router id %s", router_id) # if it is the last shared router on this adge - add it to the pool edge_id = edge_utils.get_router_edge_id(context, router_id) if not edge_id: return router_db = self.plugin._get_router(context, router_id) self._notify_before_router_edge_association(context, router_db) with locking.LockManager.get_lock(str(edge_id)): self._remove_router_services_on_edge(context, router_id) with locking.LockManager.get_lock('nsx-shared-router-pool'): self._unbind_router_on_edge(context, router_id) def attach_router(self, context, router_id, router, appliance_size=None): # find the right place to add, and create a new one if necessary router_db = self.plugin._get_router(context, router_id) self._bind_router_on_available_edge( context, router_id, router_db.admin_state_up) edge_id = edge_utils.get_router_edge_id(context, router_id) LOG.debug("Shared router %s attached to edge %s", router_id, edge_id) with locking.LockManager.get_lock(str(edge_id)): self._add_router_services_on_available_edge(context, router_id) self._notify_after_router_edge_association(context, router_db) def delete_router(self, context, router_id): # make sure that the router binding is cleaned up try: nsxv_db.delete_nsxv_router_binding(context.session, router_id) except Exception as e: LOG.debug('Unable to delete router binding for %s. Error: ' '%s', router_id, e) def _get_router_routes(self, context, router_id): return self.plugin._get_extra_routes_by_router_id( context, router_id) def _get_router_next_hop(self, context, router_id): router_qry = context.session.query(l3_db_models.Router) router_db = router_qry.filter_by(id=router_id).one() return self.plugin._get_external_attachment_info( context, router_db)[2] def _update_routes_on_routers(self, context, target_router_id, router_ids, only_if_target_routes=False): if only_if_target_routes: # First check if the target router has any routes or next hop # If not - it means that nothing changes so we can skip this # backend call target_routes = self._get_router_routes(context, target_router_id) target_next_hop = self._get_router_next_hop( context, target_router_id) if not target_routes and not target_next_hop: LOG.debug("_update_routes_on_routers skipped since router %s " "has no routes", target_router_id) return nexthop = None all_routes = [] for router_id in router_ids: routes = self._get_router_routes(context, router_id) filters = {'device_id': [router_id]} ports = self.plugin.get_ports(context.elevated(), filters) self.plugin._add_network_info_for_routes(context, routes, ports) all_routes.extend(routes) if not nexthop: router_nexthop = self._get_router_next_hop(context, router_id) if router_nexthop: nexthop = router_nexthop # TODO(berlin) do rollback op. edge_utils.update_routes(self.nsx_v, context, target_router_id, all_routes, nexthop) # return a dic of each router -> list of vnics from the other routers def _get_all_routers_vnic_indices(self, context, router_ids): all_vnic_indices = {} if len(router_ids) < 1: # there are no routers return all_vnic_indices intf_ports = self.plugin.get_ports( context.elevated(), filters={'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]}) edge_id = edge_utils.get_router_edge_id(context, router_ids[0]) edge_vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, edge_id) for this_router_id in router_ids: # get networks IDs for this router router_net_ids = list( set([port['network_id'] for port in intf_ports if port['device_id'] == this_router_id])) # get vnic index for each network vnic_indices = [] for net_id in router_net_ids: vnic_indices.extend([edge_vnic_binding.vnic_index for edge_vnic_binding in edge_vnic_bindings if edge_vnic_binding.network_id == net_id ]) # make sure the list is unique: vnic_indices = list(set(vnic_indices)) # add to the result dict all_vnic_indices[this_router_id] = list(vnic_indices) return all_vnic_indices def update_nat_rules(self, context, router, router_id): edge_id = edge_utils.get_router_edge_id(context, router_id) with locking.LockManager.get_lock(str(edge_id)): router_ids = self.edge_manager.get_routers_on_same_edge( context, router_id) self._update_nat_rules_on_routers(context, router_id, router_ids) def _update_nat_rules_on_routers(self, context, target_router_id, router_ids): edge_id, az_name = self.plugin._get_edge_id_and_az_by_rtr_id( context, target_router_id) az = self._availability_zones.get_availability_zone(az_name) snats = [] dnats = [] vnics_by_router = self._get_all_routers_vnic_indices( context, router_ids) for router_id in router_ids: router_qry = context.session.query(l3_db_models.Router) router = router_qry.filter_by(id=router_id).one() if router.gw_port: snat, dnat = self.plugin._get_nat_rules(context, router) snats.extend(snat) dnats.extend(dnat) if (not az.bind_floatingip_to_all_interfaces and len(dnat) > 0): # Copy each DNAT rule to all vnics of the other routers, # to allow NAT-ed traffic between routers # no need for that if bind_floatingip_to_all_interfaces # is on (default) other_vnics = [] for other_router_id in router_ids: if other_router_id != router_id: other_vnics.extend( vnics_by_router[other_router_id]) for rule in dnat: for vnic_index in other_vnics: new_rule = rule.copy() # use explicit vnic_index new_rule['vnic_index'] = vnic_index dnats.extend([new_rule]) edge_utils.update_nat_rules( self.nsx_v, context, target_router_id, snats, dnats, az=az) def _update_external_interface_on_routers(self, context, target_router_id, router_ids): ext_net_ids = self._get_ext_net_ids(context, router_ids) if len(ext_net_ids) > 1: LOG.error("Can't configure external interface on multiple " "external networks %(networks)s for routers %(routers)s", {'networks': ext_net_ids, 'routers': router_ids}) msg = _("Can't configure external interface on multiple external " "networks") raise nsx_exc.NsxPluginException(err_msg=msg) gateway_primary_addr = None gateway_mask = None gateway_nexthop = None secondary = [] if not ext_net_ids: ext_net_id = None else: ext_net_id = ext_net_ids[0] for router_id in router_ids: router_qry = context.session.query(l3_db_models.Router) router = router_qry.filter_by(id=router_id).one() addr, mask, nexthop = self.plugin._get_external_attachment_info( context, router) if addr: if not gateway_primary_addr: gateway_primary_addr = addr else: secondary.append(addr) if mask and not gateway_mask: gateway_mask = mask if nexthop and not gateway_nexthop: gateway_nexthop = nexthop secondary.extend(self.plugin._get_floatingips_by_router( context, router_id)) LOG.debug('Configure ext interface as following, ext_net: %s, ' 'primaryAddress: %s, netmask: %s, nexthop: %s, secondary: ' '%s.', ext_net_id, gateway_primary_addr, gateway_mask, gateway_nexthop, secondary) self.edge_manager.update_external_interface( self.nsx_v, context, target_router_id, ext_net_id, gateway_primary_addr, gateway_mask, secondary) def _update_subnets_and_dnat_firewall_on_routers(self, context, target_router_id, router_ids, allow_external=True): fw_rules = [] for router_id in router_ids: # Add FW rules per single router router_qry = context.session.query(l3_db_models.Router) router = router_qry.filter_by(id=router_id).one() # subnet rules to allow east-west traffic subnet_rules = self.plugin._get_subnet_fw_rules(context, router) if subnet_rules: fw_rules.extend(subnet_rules) # DNAT rules dnat_rule = self.plugin._get_dnat_fw_rule(context, router) if dnat_rule: fw_rules.append(dnat_rule) # Add rule for not NAT-ed allocation pools alloc_pool_rule = self.plugin._get_allocation_pools_fw_rule( context, router) if alloc_pool_rule: fw_rules.append(alloc_pool_rule) # Add no-snat rules nosnat_fw_rules = self.plugin._get_nosnat_subnets_fw_rules( context, router) fw_rules.extend(nosnat_fw_rules) # If metadata service is enabled, block access to inter-edge network if self.plugin.metadata_proxy_handler: fw_rules += nsx_v_md_proxy.get_router_fw_rules() # TODO(asarfaty): Add fwaas rules when fwaas supports shared routers fw = {'firewall_rule_list': fw_rules} edge_utils.update_firewall(self.nsx_v, context, target_router_id, fw, allow_external=allow_external) def update_routes(self, context, router_id, nexthop): edge_id = edge_utils.get_router_edge_id(context, router_id) if edge_id: router_db = self.plugin._get_router(context, router_id) available_router_ids, conflict_router_ids = ( self._get_available_and_conflicting_ids(context, router_id)) is_conflict = self.edge_manager.is_router_conflict_on_edge( context, router_id, conflict_router_ids, [], 0) if is_conflict: self._notify_before_router_edge_association(context, router_db) with locking.LockManager.get_lock(str(edge_id)): self._remove_router_services_on_edge(context, router_id) with locking.LockManager.get_lock( 'nsx-shared-router-pool'): self._unbind_router_on_edge(context, router_id) self._bind_router_on_available_edge( context, router_id, router_db.admin_state_up) new_edge_id = edge_utils.get_router_edge_id(context, router_id) with locking.LockManager.get_lock(str(new_edge_id)): self._add_router_services_on_available_edge(context, router_id) self._notify_after_router_edge_association(context, router_db) else: with locking.LockManager.get_lock(str(edge_id)): router_ids = self.edge_manager.get_routers_on_same_edge( context, router_id) if router_ids: self._update_routes_on_routers( context, router_id, router_ids) def _get_ext_net_ids(self, context, router_ids): ext_net_ids = [] for router_id in router_ids: router_qry = context.session.query(l3_db_models.Router) router_db = router_qry.filter_by(id=router_id).one() ext_net_id = router_db.gw_port_id and router_db.gw_port.network_id if ext_net_id and ext_net_id not in ext_net_ids: ext_net_ids.append(ext_net_id) return ext_net_ids def _get_shared_routers(self, context): shared_routers = [] routers_qry = context.session.query(l3_db_models.Router).all() for r in routers_qry: nsx_attr = (context.session.query( nsxv_models.NsxvRouterExtAttributes).filter_by( router_id=r['id']).first()) if nsx_attr and nsx_attr['router_type'] == 'shared': shared_routers.append(r) return shared_routers def _get_available_and_conflicting_ids(self, context, router_id): """Query all conflicting router ids with existing router id. The router with static routes will be conflict with all other routers. The routers with different gateway will be conflict. The routers with overlapping interface will be conflict. In not share_edges_between_tenants: The routers of different tenants will be in conflict with the router """ # 1. Check gateway # 2. Check subnet interface # 3. Check static routes router_list = [] src_router_dict = {} ports_qry = context.session.query(models_v2.Port) intf_ports = ports_qry.filter_by( device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF).all() gw_ports = ports_qry.filter_by( device_owner=l3_db.DEVICE_OWNER_ROUTER_GW).all() shared_routers = self._get_shared_routers(context) for r in shared_routers: router_dict = {} router_dict['id'] = r['id'] router_dict['gateway'] = None router_dict['tenant_id'] = r['tenant_id'] for gwp in gw_ports: if gwp['id'] == r['gw_port_id']: try: router_dict['gateway'] = ( gwp['fixed_ips'][0]['subnet_id']) except IndexError: LOG.error("Skipping GW port %s with no fixed IP", gwp['id']) subnet_ids = [p['fixed_ips'][0]['subnet_id'] for p in intf_ports if p['device_id'] == r['id']] router_dict['subnet_ids'] = subnet_ids extra_routes = self.plugin._get_extra_routes_by_router_id( context, r['id']) destinations = [routes['destination'] for routes in extra_routes] router_dict['destinations'] = destinations LOG.debug('The router configuration is %s for router %s', router_dict, router_dict['id']) if router_id != r['id']: router_list.append(router_dict) else: src_router_dict = router_dict # Router with static routes is conflict with other routers available_routers = [] conflict_routers = [] if src_router_dict['destinations'] != []: conflict_routers = [r['id'] for r in router_list] return (available_routers, conflict_routers) subnets_qry = context.session.query(models_v2.Subnet).all() conflict_cidr_set = [] for subnet in subnets_qry: if subnet['id'] in src_router_dict['subnet_ids']: conflict_cidr_set.append(subnet['cidr']) if (src_router_dict['gateway'] is not None and subnet['id'] == src_router_dict['gateway']): conflict_cidr_set.append(subnet['cidr']) conflict_ip_set = netaddr.IPSet(conflict_cidr_set) # Check conflict router ids with gateway and interface for r in router_list: if r['destinations'] != []: conflict_routers.append(r['id']) else: cidr_set = [] for subnet in subnets_qry: if subnet['id'] in r['subnet_ids']: cidr_set.append(subnet['cidr']) ip_set = netaddr.IPSet(cidr_set) if (src_router_dict['gateway'] is None or r['gateway'] is None or src_router_dict['gateway'] == r['gateway']): if (conflict_ip_set & ip_set): conflict_routers.append(r['id']) else: if (not cfg.CONF.nsxv.share_edges_between_tenants and src_router_dict['tenant_id'] != r['tenant_id']): # routers of other tenants are conflicting conflict_routers.append(r['id']) else: available_routers.append(r['id']) else: conflict_routers.append(r['id']) return (available_routers, conflict_routers) def _get_conflict_network_and_router_ids_by_intf(self, context, router_id): """Collect conflicting networks and routers based on interface ports. Collect conflicting networks which has overlapping subnet attached to another router. Collect conflict routers which has overlap network attached to it. Returns: conflict_network_ids: networks which has overlapping ips conflict_router_ids: routers which has overlapping interfaces intf_num: interfaces number attached on the router """ conflict_network_ids = [] conflict_router_ids = [] ports_qry = context.session.query(models_v2.Port) intf_ports = ports_qry.filter_by( device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF).all() router_net_ids = list( set([port['network_id'] for port in intf_ports if port['device_id'] == router_id])) if cfg.CONF.allow_overlapping_ips: router_intf_ports = [port for port in intf_ports if port['device_id'] == router_id] subnet_ids = [] for port in router_intf_ports: subnet_ids.append(port['fixed_ips'][0]['subnet_id']) subnets_qry = context.session.query(models_v2.Subnet).all() subnets = [subnet for subnet in subnets_qry if subnet['id'] in subnet_ids] conflict_network_ids.extend( self.plugin._get_conflict_network_ids_by_overlapping( context, subnets)) other_router_ports = [port for port in intf_ports if port['device_id'] != router_id] for port in other_router_ports: if port['network_id'] in router_net_ids: conflict_router_ids.append(port['device_id']) conflict_router_ids = list(set(conflict_router_ids)) conflict_network_ids = list(set(conflict_network_ids)) intf_num = len(router_net_ids) return (conflict_network_ids, conflict_router_ids, intf_num) def _get_conflict_network_ids_by_ext_net(self, context, router_id): """Collect conflicting networks based on external network. Collect conflicting networks which has overlapping subnet with the router's external network """ conflict_network_ids = [] ext_net_id = self._get_external_network_id_by_router(context, router_id) if ext_net_id: ext_net = self.plugin._get_network(context, ext_net_id) if ext_net.subnets: ext_subnet = ext_net.subnets[0] if ext_subnet: conflict_network_ids.extend( self.plugin._get_conflict_network_ids_by_overlapping( context, [ext_subnet])) return conflict_network_ids def _get_conflict_router_ids_by_ext_net(self, context, conflict_network_ids): """Collect conflict routers based on its external network. Collect conflict router if it has external network and the external network is in conflict_network_ids """ ext_net_filters = {'router:external': [True]} ext_nets = self.plugin.get_networks( context.elevated(), filters=ext_net_filters) ext_net_ids = [ext_net.get('id') for ext_net in ext_nets] conflict_ext_net_ids = list(set(ext_net_ids) & set(conflict_network_ids)) gw_ports_filter = {'network_id': conflict_ext_net_ids, 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_GW]} ports_qry = context.session.query(models_v2.Port) gw_ports = self.plugin._apply_filters_to_query( ports_qry, models_v2.Port, gw_ports_filter).all() return list(set([gw_port['device_id'] for gw_port in gw_ports])) def _get_optional_and_conflict_router_ids_by_gw(self, context, router_id): """Collect conflict routers and optional routers based on GW port. Collect conflict router if it has different external network, else, collect optional router if it is not distributed and exclusive Returns: optional_router_ids: routers we can use its edge for the shared router. conflict_router_ids: conflict routers which has different gateway """ ext_net_id = self._get_external_network_id_by_router(context, router_id) routers = context.session.query(l3_db_models.Router).all() optional_router_ids = [] conflict_router_ids = [] if ext_net_id: ports_qry = context.session.query(models_v2.Port) all_gw_ports = ports_qry.filter_by( device_owner=l3_db.DEVICE_OWNER_ROUTER_GW).all() metadata_nets = nsxv_db.get_nsxv_internal_networks( context.session, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE) metadata_net_ids = [metadata_net['network_id'] for metadata_net in metadata_nets] # filter out metadata gw_ports all_gw_ports = [gw_port for gw_port in all_gw_ports if gw_port['network_id'] not in metadata_net_ids] for gw_port in all_gw_ports: if gw_port and gw_port['network_id'] != ext_net_id: conflict_router_ids.append(gw_port['device_id']) for router in routers: router_res = {} self.plugin._extend_nsx_router_dict(router_res, router) if (router['id'] not in conflict_router_ids and router_res.get('router_type') == 'shared'): optional_router_ids.append(router['id']) return optional_router_ids, conflict_router_ids def _bind_router_on_available_edge(self, context, router_id, admin_state): with locking.LockManager.get_lock('nsx-shared-router-pool'): conflict_network_ids, conflict_router_ids, intf_num = ( self._get_conflict_network_and_router_ids_by_intf(context, router_id)) conflict_network_ids_by_ext_net = ( self._get_conflict_network_ids_by_ext_net(context, router_id)) conflict_network_ids.extend(conflict_network_ids_by_ext_net) optional_router_ids, new_conflict_router_ids = ( self._get_available_and_conflicting_ids(context, router_id)) conflict_router_ids.extend(new_conflict_router_ids) conflict_router_ids = list(set(conflict_router_ids)) az, flavor_id = self.get_router_az_and_flavor_by_id(context, router_id) new = self.edge_manager.bind_router_on_available_edge( context, router_id, optional_router_ids, conflict_router_ids, conflict_network_ids, intf_num, az) # configure metadata service on the router. if self.plugin.metadata_proxy_handler and new: md_proxy_handler = self.plugin.get_metadata_proxy_handler( az.name) if md_proxy_handler: md_proxy_handler.configure_router_edge(context, router_id) edge_id = edge_utils.get_router_edge_id(context, router_id) with locking.LockManager.get_lock(str(edge_id)): # add all internal interfaces of the router on edge intf_net_ids = ( self.plugin._get_internal_network_ids_by_router(context, router_id)) for network_id in intf_net_ids: address_groups = self.plugin._get_address_groups( context, router_id, network_id) edge_utils.update_internal_interface( self.nsx_v, context, router_id, network_id, address_groups, admin_state) if flavor_id: # if several routers share same edge, they might have # different flavors with conflicting syslog settings. # in this case, each new router association will override # previous syslog settings on the edge self.edge_manager.update_syslog_by_flavor(context, router_id, flavor_id, edge_id) LOG.info("Binding shared router %(rtr)s: edge %(edge)s", {'rtr': router_id, 'edge': edge_id}) def _unbind_router_on_edge(self, context, router_id): az = self.get_router_az_by_id(context, router_id) edge_id = edge_utils.get_router_edge_id(context, router_id) self.edge_manager.reconfigure_shared_edge_metadata_port( context, router_id) self.edge_manager.unbind_router_on_edge(context, router_id) if self.plugin.metadata_proxy_handler: metadata_proxy_handler = self.plugin.get_metadata_proxy_handler( az.name) if metadata_proxy_handler: metadata_proxy_handler.cleanup_router_edge(context, router_id) LOG.info("Unbinding shared router %(rtr)s: edge %(edge)s", {'rtr': router_id, 'edge': edge_id}) def _add_router_services_on_available_edge(self, context, router_id): router_ids = self.edge_manager.get_routers_on_same_edge( context, router_id) self._update_external_interface_on_routers( context, router_id, router_ids) self._update_routes_on_routers(context, router_id, router_ids, only_if_target_routes=True) self._update_nat_rules_on_routers(context, router_id, router_ids) self._update_subnets_and_dnat_firewall_on_routers( context, router_id, router_ids, allow_external=True) def _remove_router_services_on_edge(self, context, router_id, intf_net_id=None): router_ids = self.edge_manager.get_routers_on_same_edge( context, router_id) router_ids.remove(router_id) # Refresh firewall, nats, ext_vnic as well as static routes self._update_routes_on_routers(context, router_id, router_ids, only_if_target_routes=True) self._update_subnets_and_dnat_firewall_on_routers( context, router_id, router_ids, allow_external=True) self._update_nat_rules_on_routers(context, router_id, router_ids) self._update_external_interface_on_routers( context, router_id, router_ids) intf_net_ids = ( self.plugin._get_internal_network_ids_by_router(context, router_id)) if intf_net_id: intf_net_ids.remove(intf_net_id) for net_id in intf_net_ids: edge_utils.delete_interface(self.nsx_v, context, router_id, net_id) @db_api.retry_db_errors def _update_router_gw_info(self, context, router_id, info, is_routes_update=False, force_update=False): router = self.plugin._get_router(context, router_id) edge_id = edge_utils.get_router_edge_id(context, router_id) if not edge_id: super(nsx_v.NsxVPluginV2, self.plugin)._update_router_gw_info( context, router_id, info, router=router) # UPDATE gw info only if the router has been attached to an edge else: is_migrated = False router_ids = self.edge_manager.get_routers_on_same_edge( context, router_id) org_ext_net_id = (router.gw_port_id and router.gw_port.network_id) org_enable_snat = router.enable_snat orgaddr, orgmask, orgnexthop = ( self.plugin._get_external_attachment_info( context, router)) super(nsx_v.NsxVPluginV2, self.plugin)._update_router_gw_info( context, router_id, info, router=router) new_ext_net_id = (router.gw_port_id and router.gw_port.network_id) new_enable_snat = router.enable_snat newaddr, newmask, newnexthop = ( self.plugin._get_external_attachment_info(context, router)) with locking.LockManager.get_lock(str(edge_id)): if new_ext_net_id and new_ext_net_id != org_ext_net_id: # Check whether the gw address has overlapping # with networks attached to the same edge conflict_network_ids = ( self._get_conflict_network_ids_by_ext_net( context, router_id)) is_migrated = self.edge_manager.is_router_conflict_on_edge( context, router_id, [], conflict_network_ids) if is_migrated: self._remove_router_services_on_edge(context, router_id) with locking.LockManager.get_lock( 'nsx-shared-router-pool'): self._unbind_router_on_edge(context, router_id) if not is_migrated: ext_net_ids = self._get_ext_net_ids(context, router_ids) if len(ext_net_ids) > 1: # move all routing service of the router from existing # edge to a new available edge if new_ext_net_id is # changed. self._remove_router_services_on_edge(context, router_id) with locking.LockManager.get_lock( 'nsx-shared-router-pool'): self._unbind_router_on_edge(context, router_id) is_migrated = True else: updated_routes = False # Update external vnic if addr or mask is changed if orgaddr != newaddr or orgmask != newmask: # If external gateway is removed, the default # gateway should be cleared before updating the # interface, or else the backend will fail. if (new_ext_net_id != org_ext_net_id and new_ext_net_id is None): self._update_routes_on_routers( context, router_id, router_ids) updated_routes = True self._update_external_interface_on_routers( context, router_id, router_ids) # Update SNAT rules if ext net changed # or ext net not changed but snat is changed. if ((new_ext_net_id != org_ext_net_id) or (new_ext_net_id == org_ext_net_id and new_enable_snat != org_enable_snat)): self._update_nat_rules_on_routers(context, router_id, router_ids) if (new_ext_net_id != org_ext_net_id or new_enable_snat != org_enable_snat): self._update_subnets_and_dnat_firewall_on_routers( context, router_id, router_ids, allow_external=True) # Update static routes in all (if not updated yet). if not updated_routes: self._update_routes_on_routers( context, router_id, router_ids) if is_migrated: self._notify_before_router_edge_association(context, router, edge_id) self._bind_router_on_available_edge( context, router_id, router.admin_state_up) edge_id = edge_utils.get_router_edge_id(context, router_id) with locking.LockManager.get_lock(str(edge_id)): self._add_router_services_on_available_edge(context, router_id) self._notify_after_router_edge_association(context, router) def _base_add_router_interface(self, context, router_id, interface_info): with locking.LockManager.get_lock('nsx-shared-router-pool'): return super(nsx_v.NsxVPluginV2, self.plugin).add_router_interface( context, router_id, interface_info) def add_router_interface(self, context, router_id, interface_info): # Lock the shared router before any action that can cause the router # to be deployed on a new edge. with locking.LockManager.get_lock('router-%s' % router_id): return self._safe_add_router_interface(context, router_id, interface_info) def _safe_add_router_interface(self, context, router_id, interface_info): self.plugin._check_intf_number_of_router(context, router_id) edge_id = edge_utils.get_router_edge_id(context, router_id) router_db = self.plugin._get_router(context, router_id) if edge_id: is_migrated = False with locking.LockManager.get_lock('nsx-shared-router-pool'): info = super(nsx_v.NsxVPluginV2, self.plugin).add_router_interface( context, router_id, interface_info) with locking.LockManager.get_lock(str(edge_id)): router_ids = self.edge_manager.get_routers_on_same_edge( context, router_id) subnet = self.plugin.get_subnet(context, info['subnet_id']) network_id = subnet['network_id'] # Collect all conflict networks whose cidr are overlapped # with networks attached to the router and conflict routers # which has same network with the router's. conflict_network_ids, conflict_router_ids, _ = ( self._get_conflict_network_and_router_ids_by_intf( context, router_id)) _, new_conflict_router_ids = ( self._get_available_and_conflicting_ids(context, router_id)) conflict_router_ids.extend(new_conflict_router_ids) conflict_router_ids = list(set(conflict_router_ids)) interface_ports = ( self.plugin._get_router_interface_ports_by_network( context, router_id, network_id)) # Consider whether another subnet of the same network # has been attached to the router. if len(interface_ports) > 1: is_conflict = ( self.edge_manager.is_router_conflict_on_edge( context, router_id, conflict_router_ids, conflict_network_ids, 0)) else: is_conflict = ( self.edge_manager.is_router_conflict_on_edge( context, router_id, conflict_router_ids, conflict_network_ids, 1)) if not is_conflict: address_groups = self.plugin._get_address_groups( context, router_id, network_id) edge_utils.update_internal_interface( self.nsx_v, context, router_id, network_id, address_groups, router_db.admin_state_up) if router_db.gw_port and router_db.enable_snat: self._update_nat_rules_on_routers( context, router_id, router_ids) self._update_subnets_and_dnat_firewall_on_routers( context, router_id, router_ids, allow_external=True) if is_conflict: self._notify_before_router_edge_association( context, router_db, edge_id) with locking.LockManager.get_lock(str(edge_id)): if len(interface_ports) > 1: self._remove_router_services_on_edge( context, router_id) else: self._remove_router_services_on_edge( context, router_id, network_id) self._unbind_router_on_edge(context, router_id) is_migrated = True if is_migrated: self._bind_router_on_available_edge( context, router_id, router_db.admin_state_up) edge_id = edge_utils.get_router_edge_id(context, router_id) with locking.LockManager.get_lock(str(edge_id)): self._add_router_services_on_available_edge(context, router_id) self._notify_after_router_edge_association(context, router_db) else: info = self._base_add_router_interface(context, router_id, interface_info) # bind and configure routing service on an available edge self._bind_router_on_available_edge( context, router_id, router_db.admin_state_up) edge_id = edge_utils.get_router_edge_id(context, router_id) with locking.LockManager.get_lock(str(edge_id)): self._add_router_services_on_available_edge(context, router_id) self._notify_after_router_edge_association(context, router_db) return info def remove_router_interface(self, context, router_id, interface_info): # Lock the shared router before any action that can cause the router # to be deployed on a new edge with locking.LockManager.get_lock('router-%s' % router_id): return self._safe_remove_router_interface(context, router_id, interface_info) def _safe_remove_router_interface(self, context, router_id, interface_info): edge_id = edge_utils.get_router_edge_id(context, router_id) with locking.LockManager.get_lock('nsx-shared-router-pool'): info = super( nsx_v.NsxVPluginV2, self.plugin).remove_router_interface( context, router_id, interface_info) subnet = self.plugin.get_subnet(context, info['subnet_id']) network_id = subnet['network_id'] ports = self.plugin._get_router_interface_ports_by_network( context, router_id, network_id) connected_networks = ( self.plugin._get_internal_network_ids_by_router(context, router_id)) if not ports and not connected_networks: router = self.plugin._get_router(context, router_id) self._notify_before_router_edge_association(context, router) with locking.LockManager.get_lock(str(edge_id)): router_ids = self.edge_manager.get_routers_on_same_edge( context, router_id) self._update_nat_rules_on_routers(context, router_id, router_ids) self._update_subnets_and_dnat_firewall_on_routers( context, router_id, router_ids, allow_external=True) if not ports: edge_utils.delete_interface(self.nsx_v, context, router_id, network_id) # unbind all services if no interfaces attached to the # router if not connected_networks: self._remove_router_services_on_edge(context, router_id) self._unbind_router_on_edge(context, router_id) else: address_groups = self.plugin._get_address_groups( context, router_id, network_id) edge_utils.update_internal_interface(self.nsx_v, context, router_id, network_id, address_groups) return info def _update_edge_router(self, context, router_id): edge_id = edge_utils.get_router_edge_id(context, router_id) with locking.LockManager.get_lock(str(edge_id)): router_ids = self.edge_manager.get_routers_on_same_edge( context, router_id) if router_ids: self._update_external_interface_on_routers( context, router_id, router_ids) self._update_nat_rules_on_routers( context, router_id, router_ids) self._update_subnets_and_dnat_firewall_on_routers( context, router_id, router_ids, allow_external=True) vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/drivers/distributed_router_driver.py0000666000175100017510000004145413244523345030301 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_log import log as logging from oslo_utils import excutils from neutron.db import api as db_api from neutron.db import l3_db from neutron_lib import constants from neutron_lib import exceptions as n_exc from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.drivers import ( abstract_router_driver as router_driver) from vmware_nsx.plugins.nsx_v import plugin as nsx_v from vmware_nsx.plugins.nsx_v.vshield import edge_utils LOG = logging.getLogger(__name__) class RouterDistributedDriver(router_driver.RouterBaseDriver): def get_type(self): return "distributed" def _get_edge_id(self, context, router_id): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) return binding.get('edge_id') def _update_routes_on_plr(self, context, router_id, plr_id, newnexthop): lswitch_id = edge_utils.get_internal_lswitch_id_of_plr_tlr( context, router_id) subnets = self.plugin._find_router_subnets_cidrs( context.elevated(), router_id) routes = [] for subnet in subnets: routes.append({ 'destination': subnet, 'nexthop': (edge_utils.get_vdr_transit_network_tlr_address()), 'network_id': lswitch_id }) # Add extra routes referring to external network on plr extra_routes = self.plugin._prepare_edge_extra_routes( context, router_id) routes.extend([route for route in extra_routes if route.get('external')]) edge_utils.update_routes(self.nsx_v, context, plr_id, routes, newnexthop) def _update_routes_on_tlr( self, context, router_id, newnexthop=edge_utils.get_vdr_transit_network_plr_address()): routes = [] # Add extra routes referring to internal network on tlr extra_routes = self.plugin._prepare_edge_extra_routes( context, router_id) routes.extend([route for route in extra_routes if not route.get('external')]) edge_utils.update_routes(self.nsx_v, context, router_id, routes, newnexthop) def create_router(self, context, lrouter, appliance_size=None, allow_metadata=True): az = self.get_router_az(lrouter) self.edge_manager.create_lrouter(context, lrouter, dist=True, availability_zone=az) def update_router(self, context, router_id, router): r = router['router'] is_routes_update = True if 'routes' in r else False gw_info = self.plugin._extract_external_gw(context, router, is_extract=True) super(nsx_v.NsxVPluginV2, self.plugin).update_router( context, router_id, router) if gw_info != constants.ATTR_NOT_SPECIFIED: self.plugin._update_router_gw_info(context, router_id, gw_info, is_routes_update) elif is_routes_update: # here is used to handle routes which tenant updates. router_db = self.plugin._get_router(context, router_id) nexthop = self.plugin._get_external_attachment_info( context, router_db)[2] with locking.LockManager.get_lock(self._get_edge_id(context, router_id)): self.plugin._update_subnets_and_dnat_firewall(context, router_db) self._update_routes(context, router_id, nexthop) if 'admin_state_up' in r: self.plugin._update_router_admin_state( context, router_id, self.get_type(), r['admin_state_up']) if 'name' in r: self.edge_manager.rename_lrouter(context, router_id, r['name']) # if we have a plr router - rename it too plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) if plr_id: self.edge_manager.rename_lrouter(context, plr_id, r['name']) return self.plugin.get_router(context, router_id) def delete_router(self, context, router_id): self.edge_manager.delete_lrouter(context, router_id, dist=True) def update_routes(self, context, router_id, newnexthop): with locking.LockManager.get_lock(self._get_edge_id(context, router_id)): self._update_routes(context, router_id, newnexthop) def _update_routes(self, context, router_id, newnexthop): plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) if plr_id: self._update_routes_on_plr(context, router_id, plr_id, newnexthop) self._update_routes_on_tlr(context, router_id) else: self._update_routes_on_tlr(context, router_id, newnexthop=None) def _update_nexthop(self, context, router_id, newnexthop): plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) if plr_id: self._update_routes_on_plr(context, router_id, plr_id, newnexthop) @db_api.retry_db_errors def _update_router_gw_info(self, context, router_id, info, is_routes_update=False, force_update=False): router = self.plugin._get_router(context, router_id) org_ext_net_id = router.gw_port_id and router.gw_port.network_id org_enable_snat = router.enable_snat orgaddr, orgmask, orgnexthop = ( self.plugin._get_external_attachment_info( context, router)) # verify the edge was deployed before calling super code. tlr_edge_id = self._get_edge_id_or_raise(context, router_id) super(nsx_v.NsxVPluginV2, self.plugin)._update_router_gw_info( context, router_id, info, router=router) new_ext_net_id = router.gw_port_id and router.gw_port.network_id new_enable_snat = router.enable_snat newaddr, newmask, newnexthop = ( self.plugin._get_external_attachment_info( context, router)) plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) if not new_ext_net_id: if plr_id: # delete all plr relative conf with locking.LockManager.get_lock(tlr_edge_id): self.edge_manager.delete_plr_by_tlr_id( context, plr_id, router_id) else: # Connecting plr to the tlr if new_ext_net_id is not None. if not plr_id: # Get the availability zone by ID because the router dict # retrieved by +get_router does not contain this information availability_zone = self.get_router_az_by_id( context, router['id']) with locking.LockManager.get_lock(tlr_edge_id): plr_id = self.edge_manager.create_plr_with_tlr_id( context, router_id, router.get('name'), availability_zone) if new_ext_net_id != org_ext_net_id and orgnexthop: # network changed, so need to remove default gateway # and all static routes before vnic can be configured with locking.LockManager.get_lock(tlr_edge_id): edge_utils.clear_gateway(self.nsx_v, context, plr_id) # Update external vnic if addr or mask is changed if orgaddr != newaddr or orgmask != newmask: with locking.LockManager.get_lock(tlr_edge_id): self.edge_manager.update_external_interface( self.nsx_v, context, plr_id, new_ext_net_id, newaddr, newmask) # Update SNAT rules if ext net changed # or ext net not changed but snat is changed. if (new_ext_net_id != org_ext_net_id or (new_ext_net_id == org_ext_net_id and new_enable_snat != org_enable_snat)): self.plugin._update_nat_rules(context, router, plr_id) if (new_ext_net_id != org_ext_net_id or new_enable_snat != org_enable_snat or is_routes_update): # Open firewall flows on plr self.plugin._update_subnets_and_dnat_firewall( context, router, router_id=plr_id) # update static routes in all with locking.LockManager.get_lock(tlr_edge_id): self._update_routes(context, router_id, newnexthop) if new_ext_net_id: self._notify_after_router_edge_association(context, router) def _validate_subnets_routers(self, context, router_id, interface_info): # Validate that multiple subnets are not connected to the router _nsxv_plugin = self.plugin net_id, subnet_id = _nsxv_plugin._get_interface_info(context, interface_info) port_filters = {'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF], 'network_id': [net_id]} intf_ports = _nsxv_plugin.get_ports(context.elevated(), filters=port_filters) router_ids = [port['device_id'] for port in intf_ports] all_routers = _nsxv_plugin.get_routers(context, filters={'id': router_ids}) dist_routers = [router['id'] for router in all_routers if router.get('distributed') is True] if len(dist_routers) > 0: err_msg = _("network can only be attached to just one distributed " "router, the network is already attached to router " "%(router_id)s") % {'router_id': dist_routers[0]} if router_id in dist_routers: # attach to the same router again raise n_exc.InvalidInput(error_message=err_msg) else: # attach to multiple routers raise n_exc.Conflict(error_message=err_msg) # Validate that the subnet is not a v6 one subnet = self.plugin.get_subnet(context.elevated(), subnet_id) if (subnet.get('ip_version') == 6 or (subnet['cidr'] not in (constants.ATTR_NOT_SPECIFIED, None) and netaddr.IPNetwork(subnet['cidr']).version == 6)): err_msg = _("No support for IPv6 interfaces") raise n_exc.InvalidInput(error_message=err_msg) def add_router_interface(self, context, router_id, interface_info): self._validate_subnets_routers(context, router_id, interface_info) info = super(nsx_v.NsxVPluginV2, self.plugin).add_router_interface( context, router_id, interface_info) router_db = self.plugin._get_router(context, router_id) subnet = self.plugin.get_subnet(context, info['subnet_id']) network_id = subnet['network_id'] address_groups = self.plugin._get_address_groups( context, router_id, network_id) edge_id = self._get_edge_id(context, router_id) interface_created = False try: with locking.LockManager.get_lock(str(edge_id)): edge_utils.add_vdr_internal_interface(self.nsx_v, context, router_id, network_id, address_groups, router_db.admin_state_up) interface_created = True # Update edge's firewall rules to accept subnets flows. self.plugin._update_subnets_and_dnat_firewall(context, router_db) if router_db.gw_port: plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) if router_db.enable_snat: self.plugin._update_nat_rules(context, router_db, plr_id) # Open firewall flows on plr self.plugin._update_subnets_and_dnat_firewall( context, router_db, router_id=plr_id) # Update static routes of plr nexthop = self.plugin._get_external_attachment_info( context, router_db)[2] self._update_routes(context, router_id, nexthop) except Exception: with excutils.save_and_reraise_exception(): if not interface_created: super(nsx_v.NsxVPluginV2, self.plugin).remove_router_interface( context, router_id, interface_info) return info def remove_router_interface(self, context, router_id, interface_info): info = super(nsx_v.NsxVPluginV2, self.plugin).remove_router_interface( context, router_id, interface_info) router_db = self.plugin._get_router(context, router_id) subnet = self.plugin.get_subnet(context, info['subnet_id']) network_id = subnet['network_id'] with locking.LockManager.get_lock(self._get_edge_id(context, router_id)): if router_db.gw_port and router_db.enable_snat: plr_id = self.edge_manager.get_plr_by_tlr_id( context, router_id) self.plugin._update_nat_rules(context, router_db, plr_id) # Open firewall flows on plr self.plugin._update_subnets_and_dnat_firewall( context, router_db, router_id=plr_id) # Update static routes of plr nexthop = self.plugin._get_external_attachment_info( context, router_db)[2] self._update_routes(context, router_id, nexthop) self.plugin._update_subnets_and_dnat_firewall(context, router_db) # Safly remove interface, VDR can have interface to only one subnet # in a given network. edge_utils.delete_interface( self.nsx_v, context, router_id, network_id, dist=True) return info def _update_edge_router(self, context, router_id): router = self.plugin._get_router(context.elevated(), router_id) plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) self.plugin._update_external_interface( context, router, router_id=plr_id) self.plugin._update_nat_rules(context, router, router_id=plr_id) self.plugin._update_subnets_and_dnat_firewall(context, router, router_id=plr_id) def update_router_interface_ip(self, context, router_id, port_id, int_net_id, old_ip, new_ip, subnet_mask): """Update the fixed ip of a distributed router interface. """ router = self.plugin._get_router(context, router_id) if port_id == router.gw_port_id: # external port / Uplink plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) edge_id = self._get_edge_id_or_raise(context, plr_id) self.edge_manager.update_interface_addr( context, edge_id, old_ip, new_ip, subnet_mask, is_uplink=True) # Also update the nat rules self.plugin._update_nat_rules(context, router, plr_id) else: # Internal port: # get the edge-id of this router edge_id = self._get_edge_id_or_raise(context, router_id) # Get the vnic index edge_vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, edge_id, int_net_id) vnic_index = edge_vnic_binding.vnic_index self.edge_manager.update_vdr_interface_addr( context, edge_id, vnic_index, old_ip, new_ip, subnet_mask) vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/0000775000175100017510000000000013244524600022373 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/edge_utils.py0000666000175100017510000036050313244523345025107 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from distutils import version import os import random import time import eventlet import netaddr from neutron_lib.api.definitions import extra_dhcp_opt as ext_edo from neutron_lib.api import validators from neutron_lib import constants from neutron_lib import context as q_context from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import l3 as l3_exc from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import timeutils from oslo_utils import uuidutils import six from six import moves from sqlalchemy import exc as db_base_exc from sqlalchemy.orm import exc as sa_exc from vmware_nsx._i18n import _ from vmware_nsx.common import config as conf from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.common import utils as c_utils from vmware_nsx.db import db as nsx_db from vmware_nsx.db import nsxv_db from vmware_nsx.dvs import dvs from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxapi_exc from vmware_nsx.plugins.nsx_v.vshield import vcns WORKER_POOL_SIZE = 8 RP_FILTER_PROPERTY_OFF_TEMPLATE = 'sysctl.net.ipv4.conf.%s.rp_filter=%s' MAX_EDGE_PENDING_SEC = 600 LOG = logging.getLogger(__name__) _uuid = uuidutils.generate_uuid SUPPORTED_EDGE_LOG_MODULES = ('routing', 'highavailability', 'dhcp', 'loadbalancer', 'dns') SUPPORTED_EDGE_LOG_LEVELS = ('none', 'debug', 'info', 'warning', 'error') def _get_vdr_transit_network_ipobj(): transit_net = cfg.CONF.nsxv.vdr_transit_network return netaddr.IPNetwork(transit_net) def get_vdr_transit_network_netmask(): ip = _get_vdr_transit_network_ipobj() return str(ip.netmask) def get_vdr_transit_network_tlr_address(): ip = _get_vdr_transit_network_ipobj() return str(ip[1]) def get_vdr_transit_network_plr_address(): ip = _get_vdr_transit_network_ipobj() # We need to ensure backwards compatibility. The original edge address # was "169.254.2.3" if conf.DEFAULT_VDR_TRANSIT_NETWORK == cfg.CONF.nsxv.vdr_transit_network: return conf.DEFAULT_PLR_ADDRESS else: return str(ip[2]) def validate_vdr_transit_network(): try: ip = _get_vdr_transit_network_ipobj() except Exception: raise n_exc.Invalid(_("Invalid VDR transit network")) if len(ip) < 4: raise n_exc.Invalid(_("VDR transit address range too small")) if is_overlapping_reserved_subnets(cfg.CONF.nsxv.vdr_transit_network, nsxv_constants.RESERVED_IPS): raise n_exc.Invalid(_("VDR transit network overlaps reserved subnet")) def is_overlapping_reserved_subnets(cidr, reserved_subnets): """Return True if the subnet overlaps with reserved subnets. For the V plugin we have a limitation that we should not use some reserved ranges like: 169.254.128.0/17 and 169.254.1.0/24 """ range = netaddr.IPNetwork(cidr) # Check each reserved subnet for intersection for reserved_subnet in reserved_subnets: # translate the reserved subnet to a range object reserved_range = netaddr.IPNetwork(reserved_subnet) # check if new subnet overlaps this reserved subnet if (range.first <= reserved_range.last and reserved_range.first <= range.last): return True return False def parse_backup_edge_pool_opt_per_az(az): """Parse edge pool opts per AZ and returns result.""" edge_pool_opts = az.backup_edge_pool res = [] for edge_pool_def in edge_pool_opts: split = edge_pool_def.split(':') try: (edge_type, edge_size, minimum_pooled_edges, maximum_pooled_edges) = split[:4] except ValueError: raise n_exc.Invalid(_("Invalid edge pool format for availability" " zone %s") % az.name) if edge_type not in vcns_const.ALLOWED_EDGE_TYPES: msg = (_("edge type '%(edge_type)s' is not allowed, " "allowed types: %(allowed)s for availability zone " "%(name)s") % {'edge_type': edge_type, 'allowed': vcns_const.ALLOWED_EDGE_TYPES, 'name': az.name}) LOG.error(msg) raise n_exc.Invalid(msg) edge_size = edge_size or nsxv_constants.COMPACT if edge_size not in vcns_const.ALLOWED_EDGE_SIZES: msg = (_("edge size '%(edge_size)s' is not allowed, " "allowed types: %(allowed)s for availability zone " "%(name)s") % {'edge_type': edge_size, 'allowed': vcns_const.ALLOWED_EDGE_SIZES, 'name': az.name}) LOG.error(msg) raise n_exc.Invalid(msg) res.append({'edge_type': edge_type, 'edge_size': edge_size, 'minimum_pooled_edges': int(minimum_pooled_edges), 'maximum_pooled_edges': int(maximum_pooled_edges)}) edge_pool_dicts = {} for edge_type in vcns_const.ALLOWED_EDGE_TYPES: edge_pool_dicts[edge_type] = {} for r in res: edge_pool_dict = edge_pool_dicts[r['edge_type']] if r['edge_size'] in edge_pool_dict.keys(): raise n_exc.Invalid(_("Duplicate edge pool configuration for " "availability zone %s") % az.name) else: edge_pool_dict[r['edge_size']] = { 'minimum_pooled_edges': r['minimum_pooled_edges'], 'maximum_pooled_edges': r['maximum_pooled_edges']} return edge_pool_dicts class EdgeManager(object): """Edge Appliance Management. EdgeManager provides a pool of edge appliances which we can use to support DHCP&metadata, L3&FIP and LB&FW&VPN services. """ def __init__(self, nsxv_manager, plugin): LOG.debug("Start Edge Manager initialization") self._worker_pool_pid = None self._worker_pool = None self.nsxv_manager = nsxv_manager self._availability_zones = nsx_az.NsxVAvailabilityZones() self.edge_pool_dicts = self._parse_backup_edge_pool_opt() self.nsxv_plugin = nsxv_manager.callbacks.plugin self.plugin = plugin self.per_interface_rp_filter = self._get_per_edge_rp_filter_state() self._check_backup_edge_pools() def _parse_backup_edge_pool_opt(self): """Parse edge pool opts for all availability zones.""" az_list = self._availability_zones.list_availability_zones_objects() az_pools = {} for az in az_list: az_pools[az.name] = parse_backup_edge_pool_opt_per_az(az) return az_pools def _get_az_pool(self, az_name): return self.edge_pool_dicts[az_name] def _get_worker_pool(self): if self._worker_pool_pid != os.getpid(): self._worker_pool_pid = os.getpid() self._worker_pool = eventlet.GreenPool(WORKER_POOL_SIZE) return self._worker_pool def _get_per_edge_rp_filter_state(self): ver = self.nsxv_manager.vcns.get_version() if version.LooseVersion(ver) < version.LooseVersion('6.2.0'): return False return True def _mark_router_bindings_status_error(self, context, edge_id, error_reason="backend error"): for binding in nsxv_db.get_nsxv_router_bindings_by_edge( context.session, edge_id): if binding['status'] == constants.ERROR: continue LOG.error('Mark router binding ERROR for resource ' '%(res_id)s on edge %(edge_id)s due to ' '%(reason)s', {'res_id': binding['router_id'], 'edge_id': edge_id, 'reason': error_reason}) nsxv_db.update_nsxv_router_binding( context.session, binding['router_id'], status=constants.ERROR) def _deploy_edge(self, context, lrouter, lswitch=None, appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None, deploy_metadata=False): """Create an edge for logical router support.""" if context is None: context = q_context.get_admin_context() # deploy edge return self.nsxv_manager.deploy_edge(context, lrouter['id'], lrouter['name'], internal_network=None, appliance_size=appliance_size, dist=(edge_type == nsxv_constants.VDR_EDGE), availability_zone=availability_zone, deploy_metadata=deploy_metadata) def _deploy_backup_edges_on_db(self, context, num, appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): router_ids = [(vcns_const.BACKUP_ROUTER_PREFIX + _uuid())[:vcns_const.EDGE_NAME_LEN] for i in moves.range(num)] for router_id in router_ids: nsxv_db.add_nsxv_router_binding( context.session, router_id, None, None, constants.PENDING_CREATE, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone.name) return router_ids def _deploy_backup_edges_at_backend( self, context, router_ids, appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): eventlet.spawn_n(self._pool_creator, router_ids, appliance_size, edge_type, availability_zone) def _pool_creator(self, router_ids, appliance_size, edge_type, availability_zone): for router_id in router_ids: fake_router = { 'id': router_id, 'name': router_id} self._get_worker_pool().spawn_n( self._deploy_edge, None, fake_router, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone) def _delete_edge(self, context, router_binding): if router_binding['status'] == constants.ERROR: LOG.warning("Start deleting %(router_id)s corresponding " "edge: %(edge_id)s due to status error", {'router_id': router_binding['router_id'], 'edge_id': router_binding['edge_id']}) nsxv_db.update_nsxv_router_binding( context.session, router_binding['router_id'], status=constants.PENDING_DELETE) self._get_worker_pool().spawn_n( self.nsxv_manager.delete_edge, None, router_binding['router_id'], router_binding['edge_id'], dist=(router_binding['edge_type'] == nsxv_constants.VDR_EDGE)) def _delete_backup_edges_on_db(self, context, backup_router_bindings): for binding in backup_router_bindings: try: nsxv_db.update_nsxv_router_binding( context.session, binding['router_id'], status=constants.PENDING_DELETE) except sa_exc.NoResultFound: LOG.debug("Router binding %s does not exist.", binding['router_id']) def _delete_backup_edges_at_backend(self, context, backup_router_bindings): for binding in backup_router_bindings: # delete edge LOG.debug("Start deleting extra edge: %s in pool", binding['edge_id']) self._get_worker_pool().spawn_n( self.nsxv_manager.delete_edge, None, binding['router_id'], binding['edge_id'], dist=(binding['edge_type'] == nsxv_constants.VDR_EDGE)) def _clean_all_error_edge_bindings(self, context, availability_zone): # Find all backup edges in error state & # backup edges which are in pending-XXX state for too long filters = {'status': [constants.PENDING_CREATE, constants.PENDING_UPDATE, constants.PENDING_DELETE], 'availability_zone': [availability_zone.name]} if cfg.CONF.nsxv.housekeeping_readonly: filters['status'].append(constants.ERROR) like_filters = {'router_id': vcns_const.BACKUP_ROUTER_PREFIX + "%"} router_bindings = nsxv_db.get_nsxv_router_bindings( context.session, filters=filters, like_filters=like_filters) # filter only the entries in error state or too long in pending state error_router_bindings = [] for binding in router_bindings: to_delete = False if binding.status == constants.ERROR: to_delete = True elif binding.status == constants.PENDING_CREATE: # Bindings migrated from older versions have no created_at # attribute which should also be deleted. if (not binding.created_at or timeutils.is_older_than( binding.created_at, MAX_EDGE_PENDING_SEC)): to_delete = True elif (binding.status == constants.PENDING_UPDATE or binding.status == constants.PENDING_DELETE): # Bindings migrated from older versions have no updated_at # attribute. We will not delete those for now, as it is risky # and fails lots of tests. if (binding.updated_at and timeutils.is_older_than( binding.updated_at, MAX_EDGE_PENDING_SEC)): to_delete = True if to_delete: LOG.warning("Going to delete Erroneous edge: %s", binding) error_router_bindings.append(binding) self._delete_backup_edges_on_db(context, error_router_bindings) self._delete_backup_edges_at_backend(context, error_router_bindings) def _get_backup_edge_bindings(self, context, appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, db_update_lock=False, availability_zone=None): filters = {'appliance_size': [appliance_size], 'edge_type': [edge_type], 'availability_zone': [availability_zone.name], 'status': [constants.PENDING_CREATE, constants.PENDING_UPDATE, constants.ACTIVE]} like_filters = {'router_id': vcns_const.BACKUP_ROUTER_PREFIX + "%"} return nsxv_db.get_nsxv_router_bindings( context.session, filters=filters, like_filters=like_filters) def _check_backup_edge_pools(self): admin_ctx = q_context.get_admin_context() for az in self._availability_zones.list_availability_zones_objects(): self._clean_all_error_edge_bindings(admin_ctx, az) for edge_type, v in self._get_az_pool(az.name).items(): for edge_size in vcns_const.ALLOWED_EDGE_SIZES: if edge_size in v.keys(): edge_pool_range = v[edge_size] self._check_backup_edge_pool( edge_pool_range['minimum_pooled_edges'], edge_pool_range['maximum_pooled_edges'], appliance_size=edge_size, edge_type=edge_type, availability_zone=az) else: self._check_backup_edge_pool( 0, 0, appliance_size=edge_size, edge_type=edge_type, availability_zone=az) def _check_backup_edge_pool(self, minimum_pooled_edges, maximum_pooled_edges, appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): """Check edge pool's status and return one available edge for use.""" admin_ctx = q_context.get_admin_context() backup_router_bindings = self._get_backup_edge_bindings( admin_ctx, appliance_size=appliance_size, edge_type=edge_type, db_update_lock=True, availability_zone=availability_zone) backup_num = len(backup_router_bindings) if backup_num > maximum_pooled_edges: self._delete_backup_edges_on_db( admin_ctx, backup_router_bindings[:backup_num - maximum_pooled_edges]) elif backup_num < minimum_pooled_edges: new_backup_num = backup_num router_ids = [] while (new_backup_num < minimum_pooled_edges): router_ids.extend( self._deploy_backup_edges_on_db( admin_ctx, 1, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone)) new_backup_num = len( self._get_backup_edge_bindings( admin_ctx, appliance_size=appliance_size, edge_type=edge_type, db_update_lock=True, availability_zone=availability_zone)) if backup_num > maximum_pooled_edges: self._delete_backup_edges_at_backend( admin_ctx, backup_router_bindings[:backup_num - maximum_pooled_edges]) elif backup_num < minimum_pooled_edges: self._deploy_backup_edges_at_backend( admin_ctx, router_ids, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone) def check_edge_active_at_backend(self, edge_id): try: status = self.nsxv_manager.get_edge_status(edge_id) return (status == vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE) except Exception: return False def _get_available_router_binding(self, context, appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): backup_router_bindings = self._get_backup_edge_bindings( context, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone) while backup_router_bindings: router_binding = random.choice(backup_router_bindings) if (router_binding['status'] == constants.ACTIVE): if not self.check_edge_active_at_backend( router_binding['edge_id']): LOG.debug("Delete unavailable backup resource " "%(router_id)s with edge_id %(edge_id)s", {'router_id': router_binding['router_id'], 'edge_id': router_binding['edge_id']}) self._delete_edge(context, router_binding) else: LOG.debug("Get an available backup resource " "%(router_id)s with edge_id %(edge_id)s", {'router_id': router_binding['router_id'], 'edge_id': router_binding['edge_id']}) return router_binding backup_router_bindings.remove(router_binding) def _get_physical_provider_network(self, context, network_id, az_dvs): bindings = nsxv_db.get_network_bindings(context.session, network_id) # Set the return value as the availability zone DVS-ID of the # mgmt/edge cluster phys_net = az_dvs network_type = None if bindings: binding = bindings[0] network_type = binding['binding_type'] if (network_type == c_utils.NsxVNetworkTypes.VLAN and binding['phy_uuid'] != ''): if ',' not in binding['phy_uuid']: phys_net = binding['phy_uuid'] # Return user input physical network value for all network types # except VXLAN networks. The DVS-ID of the mgmt/edge cluster must # be returned for VXLAN network types. # We also validate that this binding starts with 'dvs'. If a admin # creates a provider portgroup then we need to use the default # configured DVS. elif (not network_type == c_utils.NsxVNetworkTypes.VXLAN and binding['phy_uuid'] != '' and binding['phy_uuid'].startswith('dvs')): phys_net = binding['phy_uuid'] return phys_net, network_type def _create_sub_interface(self, context, network_id, network_name, tunnel_index, address_groups, port_group_id=None): az = self.plugin.get_network_az_by_net_id(context, network_id) vcns_network_id = _retrieve_nsx_switch_id(context, network_id, az.name) if port_group_id is None: portgroup = {'vlanId': 0, 'networkName': network_name, 'networkBindingType': 'Static', 'networkType': 'Isolation'} config_spec = {'networkSpec': portgroup} dvs_id, network_type = self._get_physical_provider_network( context, network_id, az.dvs_id) pg, port_group_id = self.nsxv_manager.vcns.create_port_group( dvs_id, config_spec) # Ensure that the portgroup has the correct teaming self.plugin._update_network_teaming(dvs_id, None, port_group_id) interface = { 'name': _uuid(), 'tunnelId': tunnel_index, 'logicalSwitchId': vcns_network_id, 'isConnected': True } interface['addressGroups'] = {'addressGroups': address_groups} return port_group_id, interface def _getvnic_config(self, edge_id, vnic_index): _, vnic_config = self.nsxv_manager.get_interface(edge_id, vnic_index) return vnic_config def _delete_dhcp_internal_interface(self, context, edge_id, vnic_index, tunnel_index, network_id): """Delete the dhcp internal interface.""" LOG.debug("Query the vnic %s for DHCP Edge %s", vnic_index, edge_id) try: vnic_config = self._getvnic_config(edge_id, vnic_index) sub_interfaces = (vnic_config['subInterfaces']['subInterfaces'] if 'subInterfaces' in vnic_config else []) port_group_id = (vnic_config['portgroupId'] if 'portgroupId' in vnic_config else None) for sub_interface in sub_interfaces: if tunnel_index == sub_interface['tunnelId']: LOG.debug("Delete the tunnel %d on vnic %d", tunnel_index, vnic_index) (vnic_config['subInterfaces']['subInterfaces']. remove(sub_interface)) break # Clean the vnic if there is no sub-interface attached if len(sub_interfaces) == 0: header, _ = self.nsxv_manager.vcns.delete_interface(edge_id, vnic_index) if port_group_id: az = self.plugin.get_network_az_by_net_id( context, network_id) dvs_id, net_type = self._get_physical_provider_network( context, network_id, az.dvs_id) self.nsxv_manager.delete_port_group(dvs_id, port_group_id) else: self.nsxv_manager.vcns.update_interface(edge_id, vnic_config) except nsxapi_exc.VcnsApiException: LOG.exception('Failed to delete vnic %(vnic_index)d ' 'tunnel %(tunnel_index)d on edge %(edge_id)s ' 'for network %(net_id)s', {'vnic_index': vnic_index, 'tunnel_index': tunnel_index, 'net_id': network_id, 'edge_id': edge_id}) self._mark_router_bindings_status_error( context, edge_id, error_reason="delete dhcp internal interface failure") self._delete_dhcp_router_binding(context, network_id, edge_id) def _delete_dhcp_router_binding(self, context, network_id, edge_id): """Delete the router binding or clean the edge appliance.""" resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] bindings = nsxv_db.get_nsxv_router_bindings_by_edge( context.session, edge_id) all_edge_dhcp_entries = [binding['router_id'] for binding in bindings if binding['router_id']. startswith(vcns_const.DHCP_EDGE_PREFIX)] for router_id in all_edge_dhcp_entries: if (router_id != resource_id): # There are additional networks on this DHCP edge. # just delete the binding one and not the edge itself nsxv_db.delete_nsxv_router_binding(context.session, resource_id) return az_name = bindings[0]['availability_zone'] if bindings else '' self._free_dhcp_edge_appliance(context, network_id, az_name) def _addr_groups_convert_to_ipset(self, address_groups): cidr_list = [] for addr_group in address_groups: cidr = "/".join([addr_group['primaryAddress'], addr_group['subnetPrefixLength']]) cidr_list.append(cidr) return netaddr.IPSet(cidr_list) def _update_dhcp_internal_interface(self, context, edge_id, vnic_index, tunnel_index, network_id, address_groups): """Update the dhcp internal interface: 1. Add a new vnic tunnel with the address groups 2. Update the address groups to an existing tunnel """ LOG.debug("Query the vnic %s for DHCP Edge %s", vnic_index, edge_id) h, vnic_config = self.nsxv_manager.get_interface(edge_id, vnic_index) sub_iface_dict = vnic_config.get('subInterfaces') port_group_id = vnic_config.get('portgroupId') new_tunnel_creation = True iface_list = [] # Update the sub interface address groups for specific tunnel if sub_iface_dict: sub_interfaces = sub_iface_dict.get('subInterfaces') addr_groups_ipset = self._addr_groups_convert_to_ipset( address_groups) for sb in sub_interfaces: if tunnel_index == sb['tunnelId']: new_tunnel_creation = False sb['addressGroups']['addressGroups'] = address_groups else: sb_ipset = self._addr_groups_convert_to_ipset( sb['addressGroups']['addressGroups']) if addr_groups_ipset & sb_ipset: ls_id = sb['logicalSwitchId'] net_ids = nsx_db.get_net_ids(context.session, ls_id) if net_ids: # Here should never happen, else one bug occurs LOG.error("net %(id)s on edge %(edge_id)s " "overlaps with new net %(net_id)s", {'id': net_ids[0], 'edge_id': edge_id, 'net_id': network_id}) raise nsx_exc.NsxPluginException( err_msg=(_("update dhcp interface for net %s " "failed") % network_id)) else: # Occurs when there are DB inconsistency sb["is_overlapped"] = True LOG.error("unexpected sub intf %(id)s on edge " "%(edge_id)s overlaps with new net " "%(net_id)s. we would update with " "deleting it for DB consistency", {'id': ls_id, 'edge_id': edge_id, 'net_id': network_id}) iface_list = [sub for sub in sub_interfaces if not sub.get('is_overlapped', False)] # The first DHCP service creation, not update if new_tunnel_creation: network_name_item = [edge_id, str(vnic_index), str(tunnel_index)] network_name = ('-'.join(network_name_item) + _uuid())[:36] port_group_id, iface = self._create_sub_interface( context, network_id, network_name, tunnel_index, address_groups, port_group_id) iface_list.append(iface) LOG.debug("Update the vnic %d for DHCP Edge %s", vnic_index, edge_id) self.nsxv_manager.update_interface('fake_router_id', edge_id, vnic_index, port_group_id, tunnel_index, address_groups=iface_list) @vcns.retry_upon_exception(db_base_exc.OperationalError, max_delay=10) def _allocate_edge_appliance(self, context, resource_id, name, appliance_size=nsxv_constants.COMPACT, dist=False, availability_zone=None, deploy_metadata=False): """Try to allocate one available edge from pool.""" edge_type = (nsxv_constants.VDR_EDGE if dist else nsxv_constants.SERVICE_EDGE) lrouter = {'id': resource_id, 'name': name} az_pool = self._get_az_pool(availability_zone.name) edge_pool_range = az_pool[edge_type].get(appliance_size) if edge_pool_range is None: nsxv_db.add_nsxv_router_binding( context.session, resource_id, None, None, constants.PENDING_CREATE, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone.name) return self._deploy_edge(context, lrouter, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone, deploy_metadata=deploy_metadata) with locking.LockManager.get_lock('nsx-edge-backup-pool'): self._clean_all_error_edge_bindings( context, availability_zone=availability_zone) available_router_binding = self._get_available_router_binding( context, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone) if available_router_binding: # Update the status from ACTIVE to PENDING_UPDATE # in case of other threads select the same router binding nsxv_db.update_nsxv_router_binding( context.session, available_router_binding['router_id'], status=constants.PENDING_UPDATE) # Synchronously deploy an edge if no available edge in pool. if not available_router_binding: # store router-edge mapping binding nsxv_db.add_nsxv_router_binding( context.session, resource_id, None, None, constants.PENDING_CREATE, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone.name) edge_id = self._deploy_edge(context, lrouter, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone, deploy_metadata=deploy_metadata) else: LOG.debug("Select edge: %(edge_id)s from pool for %(name)s", {'edge_id': available_router_binding['edge_id'], 'name': name}) # select the first available edge in pool. nsxv_db.delete_nsxv_router_binding( context.session, available_router_binding['router_id']) nsxv_db.add_nsxv_router_binding( context.session, lrouter['id'], available_router_binding['edge_id'], None, constants.PENDING_CREATE, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone.name) edge_id = available_router_binding['edge_id'] LOG.debug("Select edge: %(edge_id)s from pool for %(name)s", {'edge_id': edge_id, 'name': name}) with locking.LockManager.get_lock(str(edge_id)): self.nsxv_manager.callbacks.complete_edge_creation( context, edge_id, lrouter['name'], lrouter['id'], dist, True, availability_zone=availability_zone, deploy_metadata=deploy_metadata) try: self.nsxv_manager.rename_edge(edge_id, name) except nsxapi_exc.VcnsApiException as e: LOG.error("Failed to update edge: %s", e.response) self.nsxv_manager.callbacks.complete_edge_update( context, edge_id, resource_id, False, set_errors=True) backup_num = len(self._get_backup_edge_bindings( context, appliance_size=appliance_size, edge_type=edge_type, db_update_lock=True, availability_zone=availability_zone)) router_ids = self._deploy_backup_edges_on_db( context, edge_pool_range['minimum_pooled_edges'] - backup_num, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone) self._deploy_backup_edges_at_backend( context, router_ids, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone) return edge_id def _free_edge_appliance(self, context, router_id): """Try to collect one edge to pool.""" with locking.LockManager.get_lock('nsx-edge-backup-pool'): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if not binding: LOG.warning("router binding for router: %s " "not found", router_id) return dist = (binding['edge_type'] == nsxv_constants.VDR_EDGE) edge_id = binding['edge_id'] availability_zone_name = nsxv_db.get_edge_availability_zone( context.session, edge_id) az_pool = self._get_az_pool(availability_zone_name) edge_pool_range = az_pool[binding['edge_type']].get( binding['appliance_size']) nsxv_db.delete_nsxv_router_binding( context.session, router_id) backup_router_id = (vcns_const.BACKUP_ROUTER_PREFIX + _uuid())[:vcns_const.EDGE_NAME_LEN] nsxv_db.add_nsxv_router_binding( context.session, backup_router_id, edge_id, None, constants.PENDING_UPDATE, appliance_size=binding['appliance_size'], edge_type=binding['edge_type'], availability_zone=availability_zone_name) router_id = backup_router_id if (binding['status'] == constants.ERROR or not self.check_edge_active_at_backend(edge_id) or not edge_pool_range): nsxv_db.update_nsxv_router_binding( context.session, router_id, status=constants.PENDING_DELETE) # delete edge self._get_worker_pool().spawn_n( self.nsxv_manager.delete_edge, None, router_id, edge_id, dist=dist) return availability_zone = self._availability_zones.get_availability_zone( availability_zone_name) self._clean_all_error_edge_bindings( context, availability_zone=availability_zone) backup_router_bindings = self._get_backup_edge_bindings( context, appliance_size=binding['appliance_size'], edge_type=binding['edge_type'], availability_zone=availability_zone) backup_num = len(backup_router_bindings) # collect the edge to pool if pool not full if backup_num < edge_pool_range['maximum_pooled_edges']: # change edge's name at backend update_result = self.nsxv_manager.update_edge( context, backup_router_id, edge_id, backup_router_id, None, appliance_size=binding['appliance_size'], dist=dist, availability_zone=availability_zone) # Clean all edge vnic bindings nsxv_db.clean_edge_vnic_binding(context.session, edge_id) # Refresh edge_vnic_bindings for centralized router if not dist and edge_id: nsxv_db.init_edge_vnic_binding(context.session, edge_id) if update_result: nsxv_db.update_nsxv_router_binding( context.session, backup_router_id, status=constants.ACTIVE) LOG.debug("Collect edge: %s to pool", edge_id) else: nsxv_db.update_nsxv_router_binding( context.session, router_id, status=constants.PENDING_DELETE) # delete edge self._get_worker_pool().spawn_n( self.nsxv_manager.delete_edge, None, router_id, edge_id, dist=dist) def _allocate_dhcp_edge_appliance(self, context, resource_id, availability_zone): resource_name = (vcns_const.DHCP_EDGE_PREFIX + _uuid())[:vcns_const.EDGE_NAME_LEN] self._allocate_edge_appliance( context, resource_id, resource_name, appliance_size=vcns_const.SERVICE_SIZE_MAPPING['dhcp'], availability_zone=availability_zone, deploy_metadata=True) def allocate_lb_edge_appliance( self, context, resource_id, availability_zone, appliance_size=vcns_const.SERVICE_SIZE_MAPPING['lb']): return self._allocate_edge_appliance( context, resource_id, resource_id, appliance_size=appliance_size, availability_zone=availability_zone) def _free_dhcp_edge_appliance(self, context, network_id, az_name): router_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] # if there are still metadata ports on this edge - delete them now if self.plugin.metadata_proxy_handler: metadata_proxy_handler = self.plugin.get_metadata_proxy_handler( az_name) if metadata_proxy_handler: metadata_proxy_handler.cleanup_router_edge(context, router_id, warn=True) self._free_edge_appliance(context, router_id) def _build_lrouter_name(self, router_id, router_name): return ( router_name[:nsxv_constants.ROUTER_NAME_LENGTH - len(router_id)] + '-' + router_id) def update_syslog_by_flavor(self, context, router_id, flavor_id, edge_id): """Update syslog config on edge according to router flavor.""" syslog_config = self._get_syslog_config_from_flavor(context, router_id, flavor_id) if syslog_config: self.nsxv_manager.update_edge_syslog(edge_id, syslog_config, router_id) def create_lrouter( self, context, lrouter, lswitch=None, dist=False, appliance_size=vcns_const.SERVICE_SIZE_MAPPING['router'], availability_zone=None): """Create an edge for logical router support.""" router_name = self._build_lrouter_name(lrouter['id'], lrouter['name']) edge_id = self._allocate_edge_appliance( context, lrouter['id'], router_name, appliance_size=appliance_size, dist=dist, availability_zone=availability_zone) if lrouter.get('flavor_id'): self.update_syslog_by_flavor(context, lrouter['id'], lrouter['flavor_id'], edge_id) return edge_id def delete_lrouter(self, context, router_id, dist=False): self._free_edge_appliance(context, router_id) def rename_lrouter(self, context, router_id, new_name): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if not binding or not binding['edge_id']: LOG.warning("router binding for router: %s " "not found", router_id) return edge_id = binding['edge_id'] with locking.LockManager.get_lock(str(edge_id)): router_name = self._build_lrouter_name(router_id, new_name) self.nsxv_manager.rename_edge(edge_id, router_name) def resize_lrouter(self, context, router_id, new_size): # get the router edge-id binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if not binding or not binding['edge_id']: LOG.warning("router binding for router: %s " "not found", router_id) return edge_id = binding['edge_id'] with locking.LockManager.get_lock(str(edge_id)): # update the router on backend self.nsxv_manager.resize_edge(edge_id, new_size) # update the DB nsxv_db.update_nsxv_router_binding( context.session, router_id, appliance_size=new_size) def update_dhcp_edge_bindings(self, context, network_id): """Reconfigure the DHCP to the edge.""" resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] edge_binding = nsxv_db.get_nsxv_router_binding(context.session, resource_id) if not edge_binding: return with locking.LockManager.get_lock(str(edge_binding['edge_id'])): self.update_dhcp_service_config(context, edge_binding['edge_id']) def _add_dhcp_option(self, static_config, opt): if 'dhcpOptions' not in static_config: static_config['dhcpOptions'] = {} opt_name = opt['opt_name'] opt_val = opt['opt_value'] if opt_name in vcns_const.SUPPORTED_DHCP_OPTIONS: key = vcns_const.SUPPORTED_DHCP_OPTIONS[opt_name] if opt_name == 'classless-static-route': if 'option121' not in static_config['dhcpOptions']: static_config['dhcpOptions']['option121'] = { 'staticRoutes': []} opt121 = static_config['dhcpOptions']['option121'] net, ip = opt_val.split(',') opt121['staticRoutes'].append({'destinationSubnet': net, 'router': ip}) elif (opt_name == 'tftp-server-address' or opt_name == 'tftp-server'): if 'option150' not in static_config['dhcpOptions']: static_config['dhcpOptions']['option150'] = { 'tftpServers': []} opt150 = static_config['dhcpOptions']['option150'] opt150['tftpServers'].append(opt_val) else: static_config['dhcpOptions'][key] = opt_val else: if 'other' not in static_config['dhcpOptions']: static_config['dhcpOptions']['others'] = [] static_config['dhcpOptions']['others'].append( {'code': opt_name, 'value': opt_val}) def create_static_binding(self, context, port): """Create the DHCP Edge static binding configuration """ static_bindings = [] static_config = {} static_config['macAddress'] = port['mac_address'] static_config['hostname'] = port['id'] static_config['leaseTime'] = cfg.CONF.nsxv.dhcp_lease_time for fixed_ip in port['fixed_ips']: # Query the subnet to get gateway and DNS try: subnet_id = fixed_ip['subnet_id'] subnet = self.nsxv_plugin._get_subnet(context, subnet_id) except n_exc.SubnetNotFound: LOG.debug("No related subnet for port %s", port['id']) continue # Only configure if subnet has DHCP support if not subnet['enable_dhcp']: continue static_config['ipAddress'] = fixed_ip['ip_address'] # Set gateway for static binding static_config['defaultGateway'] = subnet['gateway_ip'] # set primary and secondary dns name_servers = [dns['address'] for dns in subnet['dns_nameservers']] # if no nameservers have been configured then use the ones # defined in the configuration name_servers = name_servers or cfg.CONF.nsxv.nameservers if len(name_servers) == 1: static_config['primaryNameServer'] = name_servers[0] elif len(name_servers) >= 2: static_config['primaryNameServer'] = name_servers[0] static_config['secondaryNameServer'] = name_servers[1] # Set search domain for static binding sub_binding = nsxv_db.get_nsxv_subnet_ext_attributes( context.session, subnet_id) dns_search_domain = None if sub_binding and sub_binding.dns_search_domain: dns_search_domain = sub_binding.dns_search_domain elif cfg.CONF.nsxv.dns_search_domain: dns_search_domain = cfg.CONF.nsxv.dns_search_domain if dns_search_domain: static_config['domainName'] = dns_search_domain if sub_binding and sub_binding.dhcp_mtu: static_config = self.add_mtu_on_static_binding( static_config, sub_binding.dhcp_mtu) self.handle_meta_static_route( context, subnet_id, [static_config]) for host_route in subnet['routes']: self.add_host_route_on_static_bindings( [static_config], host_route['destination'], host_route['nexthop']) dhcp_opts = port.get(ext_edo.EXTRADHCPOPTS) if dhcp_opts is not None: for opt in dhcp_opts: self._add_dhcp_option(static_config, opt) static_bindings.append(static_config) return static_bindings def add_host_route_on_static_bindings(self, static_bindings, dest_cidr, nexthop): """Add one host route on a bulk of static bindings config. We can add host route on VM via dhcp option121. this func can only works at NSXv version 6.2.3 or higher. """ for binding in static_bindings: if 'dhcpOptions' not in six.iterkeys(binding): binding['dhcpOptions'] = {} if 'option121' not in six.iterkeys(binding['dhcpOptions']): binding['dhcpOptions']['option121'] = {'staticRoutes': []} binding_opt121 = binding['dhcpOptions']['option121'] if 'staticRoutes' not in six.iterkeys(binding_opt121): binding_opt121['staticRoutes'] = [] binding_opt121['staticRoutes'].append({ 'destinationSubnet': dest_cidr, 'router': nexthop}) return static_bindings def add_mtu_on_static_binding(self, static_binding, mtu): """Add the pre-configured MTU to a static binding config. We can add the MTU via dhcp option26. This func can only works at NSXv version 6.2.3 or higher. """ if 'dhcpOptions' not in six.iterkeys(static_binding): static_binding['dhcpOptions'] = {} static_binding['dhcpOptions']['option26'] = mtu return static_binding def handle_meta_static_route(self, context, subnet_id, static_bindings): is_dhcp_option121 = self.nsxv_plugin.is_dhcp_metadata(context, subnet_id) if is_dhcp_option121: dhcp_ip = self.nsxv_plugin._get_dhcp_ip_addr_from_subnet( context, subnet_id) if dhcp_ip: self.add_host_route_on_static_bindings( static_bindings, '169.254.169.254/32', dhcp_ip) else: LOG.error("Failed to find the dhcp port on subnet " "%s to do metadata host route insertion", subnet_id) def update_dhcp_service_config(self, context, edge_id): """Reconfigure the DHCP to the edge.""" # Get all networks attached to the edge edge_vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, edge_id) dhcp_networks = [edge_vnic_binding.network_id for edge_vnic_binding in edge_vnic_bindings] subnets = self.nsxv_plugin.get_subnets( context.elevated(), filters={'network_id': dhcp_networks, 'enable_dhcp': [True]}) static_bindings = [] for subnet in subnets: ports = self.nsxv_plugin.get_ports( context.elevated(), filters={'network_id': [subnet['network_id']], 'fixed_ips': {'subnet_id': [subnet['id']]}}) inst_ports = [port for port in ports if port['device_owner'].startswith('compute')] for port in inst_ports: static_bindings.extend( self.create_static_binding( context.elevated(), port)) dhcp_request = { 'featureType': "dhcp_4.0", 'enabled': True, 'staticBindings': {'staticBindings': static_bindings}} self.nsxv_manager.vcns.reconfigure_dhcp_service( edge_id, dhcp_request) bindings_get = get_dhcp_binding_mappings(self.nsxv_manager, edge_id) # Refresh edge_dhcp_static_bindings attached to edge nsxv_db.clean_edge_dhcp_static_bindings_by_edge( context.session, edge_id) for mac_address, binding_id in bindings_get.items(): nsxv_db.create_edge_dhcp_static_binding(context.session, edge_id, mac_address, binding_id) def _get_random_available_edge(self, available_edge_ids): while available_edge_ids: # Randomly select an edge ID from the pool. new_id = random.choice(available_edge_ids) # Validate whether the edge exists on the backend. if not self.check_edge_active_at_backend(new_id): # Remove edge_id from available edges pool. available_edge_ids.remove(new_id) LOG.warning("Skipping edge: %s due to inactive status on " "the backend.", new_id) else: return new_id def _get_available_edges(self, context, network_id, conflicting_nets, availability_zone): if conflicting_nets is None: conflicting_nets = [] conflict_edge_ids = [] available_edge_ids = [] filters = {'availability_zone': [availability_zone.name]} router_bindings = nsxv_db.get_nsxv_router_bindings(context.session, filters=filters) all_dhcp_edges = {binding['router_id']: binding['edge_id'] for binding in router_bindings if (binding['router_id']. startswith(vcns_const.DHCP_EDGE_PREFIX) and binding['status'] == constants.ACTIVE)} # Special case if there is more than one subnet per exclusive DHCP # network if availability_zone.exclusive_dhcp_edge: router_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] edge_id = all_dhcp_edges.get(router_id) if edge_id: LOG.info("Reusing the same DHCP edge for network %s", network_id) available_edge_ids.append(edge_id) return (conflict_edge_ids, available_edge_ids) if all_dhcp_edges: for dhcp_edge_id in set(all_dhcp_edges.values()): edge_vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, dhcp_edge_id) free_number = ((vcns_const.MAX_VNIC_NUM - 1) * vcns_const.MAX_TUNNEL_NUM - len(edge_vnic_bindings)) # metadata internal network will use one vnic or # exclusive_dhcp_edge is set for the AZ if (free_number <= (vcns_const.MAX_TUNNEL_NUM - 1) or availability_zone.exclusive_dhcp_edge): conflict_edge_ids.append(dhcp_edge_id) for net_id in conflicting_nets: router_id = (vcns_const.DHCP_EDGE_PREFIX + net_id)[:36] edge_id = all_dhcp_edges.get(router_id) if (edge_id and edge_id not in conflict_edge_ids): conflict_edge_ids.append(edge_id) for x in all_dhcp_edges.values(): if (x not in conflict_edge_ids and x not in available_edge_ids): available_edge_ids.append(x) return (conflict_edge_ids, available_edge_ids) def _get_used_edges(self, context, subnet, availability_zone): """Returns conflicting and available edges for the subnet.""" conflicting = self.plugin._get_conflicting_networks_for_subnet( context, subnet) return self._get_available_edges(context, subnet['network_id'], conflicting, availability_zone) def remove_network_from_dhcp_edge(self, context, network_id, edge_id): # If DHCP edge was created initially for this network, metadata port # Might use this network's DHCP router_id as device_id. Call the # following to validate this self.reconfigure_shared_edge_metadata_port( context, (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36]) old_binding = nsxv_db.get_edge_vnic_binding( context.session, edge_id, network_id) if not old_binding: LOG.error("Remove network %(id)s failed since no binding " "found on edge %(edge_id)s", {'id': network_id, 'edge_id': edge_id}) self._delete_dhcp_router_binding(context, network_id, edge_id) return old_vnic_index = old_binding['vnic_index'] old_tunnel_index = old_binding['tunnel_index'] # Cut off the port group/virtual wire connection nsxv_db.free_edge_vnic_by_network(context.session, edge_id, network_id) try: # update dhcp service config on edge_id self.update_dhcp_service_config(context, edge_id) except nsxapi_exc.VcnsApiException: LOG.exception('Failed to delete vnic %(vnic_index)d ' 'tunnel %(tunnel_index)d on edge %(edge_id)s', {'vnic_index': old_vnic_index, 'tunnel_index': old_tunnel_index, 'edge_id': edge_id}) self._mark_router_bindings_status_error( context, edge_id, error_reason="remove network from dhcp edge failure") except Exception: LOG.exception('Failed to delete vnic %(vnic_index)d ' 'tunnel %(tunnel_index)d on edge %(edge_id)s', {'vnic_index': old_vnic_index, 'tunnel_index': old_tunnel_index, 'edge_id': edge_id}) self._delete_dhcp_internal_interface(context, edge_id, old_vnic_index, old_tunnel_index, network_id) def reuse_existing_dhcp_edge(self, context, edge_id, resource_id, network_id, availability_zone): app_size = vcns_const.SERVICE_SIZE_MAPPING['dhcp'] # There may be edge cases when we are waiting for edges to deploy # and the underlying db session may hit a timeout. So this creates # a new session context = q_context.get_admin_context() nsxv_db.add_nsxv_router_binding( context.session, resource_id, edge_id, None, constants.ACTIVE, appliance_size=app_size, availability_zone=availability_zone.name) nsxv_db.allocate_edge_vnic_with_tunnel_index( context.session, edge_id, network_id, availability_zone.name) def reconfigure_shared_edge_metadata_port(self, context, org_router_id): if not self.plugin.metadata_proxy_handler: return org_binding = nsxv_db.get_nsxv_router_binding(context.session, org_router_id) if not org_binding: return az_name = org_binding['availability_zone'] int_net = nsxv_db.get_nsxv_internal_network( context.session, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE, az_name) if not int_net: return # Query the ports of this internal network internal_nets = [int_net['network_id']] ports = self.nsxv_plugin.get_ports( context, filters={'device_id': [org_router_id], 'network_id': internal_nets}) if not ports: LOG.debug('No metadata ports found for %s', org_router_id) return elif len(ports) > 1: LOG.debug('Expecting one metadata port for %s. Found %d ports', org_router_id, len(ports)) edge_id = org_binding['edge_id'] bindings = nsxv_db.get_nsxv_router_bindings( context.session, filters={'edge_id': [edge_id]}) for binding in bindings: if binding['router_id'] != org_router_id: for port in ports: self.plugin.update_port( context, port['id'], {'port': {'device_id': binding['router_id']}}) return def allocate_new_dhcp_edge(self, context, network_id, resource_id, availability_zone): self._allocate_dhcp_edge_appliance(context, resource_id, availability_zone) new_edge = nsxv_db.get_nsxv_router_binding(context.session, resource_id) nsxv_db.allocate_edge_vnic_with_tunnel_index( context.session, new_edge['edge_id'], network_id, availability_zone.name) return new_edge['edge_id'] def create_dhcp_edge_service(self, context, network_id, subnet): """ Create an edge if there is no available edge for dhcp service, Update an edge if there is available edge for dhcp service If new edge was allocated, return resource_id, else return None """ availability_zone = self.plugin.get_network_az_by_net_id( context, network_id) # Check if the network has one related dhcp edge resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] dhcp_edge_binding = nsxv_db.get_nsxv_router_binding(context.session, resource_id) allocate_new_edge = False with locking.LockManager.get_lock('nsx-dhcp-edge-pool'): (conflict_edge_ids, available_edge_ids) = self._get_used_edges(context, subnet, availability_zone) LOG.debug("The available edges %s, the conflict edges %s ", available_edge_ids, conflict_edge_ids) edge_id = None # Check if the network can stay on the existing DHCP edge if dhcp_edge_binding: edge_id = dhcp_edge_binding['edge_id'] LOG.debug("At present network %s is using edge %s", network_id, edge_id) with locking.LockManager.get_lock(str(edge_id)): # Delete the existing vnic interface if there is # an overlapping subnet or the binding is in ERROR status if (edge_id in conflict_edge_ids or dhcp_edge_binding['status'] == constants.ERROR): LOG.debug("Removing network %s from dhcp edge %s", network_id, edge_id) self.remove_network_from_dhcp_edge(context, network_id, edge_id) edge_id = None if not edge_id: #Attach the network to a new Edge and update vnic: #1. Find an available existing edge or create a new one #2. For the existing one, cut off the old port group # connection #3. Create the new port group connection to an existing one #4. Update the address groups to the vnic if available_edge_ids: new_id = self._get_random_available_edge( available_edge_ids) if new_id: LOG.debug("Select edge %s to support dhcp for " "network %s", new_id, network_id) self.reuse_existing_dhcp_edge( context, new_id, resource_id, network_id, availability_zone) else: allocate_new_edge = True else: allocate_new_edge = True if allocate_new_edge: self.allocate_new_dhcp_edge(context, network_id, resource_id, availability_zone) # If a new Edge was allocated, return resource_id return resource_id def update_dhcp_edge_service(self, context, network_id, address_groups=None): """Update the subnet to the dhcp edge vnic.""" if address_groups is None: address_groups = [] resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] edge_binding = nsxv_db.get_nsxv_router_binding(context.session, resource_id) if not edge_binding: LOG.warning('Edge binding does not exist for network %s', network_id) return dhcp_binding = nsxv_db.get_edge_vnic_binding(context.session, edge_binding['edge_id'], network_id) if dhcp_binding: edge_id = dhcp_binding['edge_id'] with locking.LockManager.get_lock(str(edge_id)): vnic_index = dhcp_binding['vnic_index'] tunnel_index = dhcp_binding['tunnel_index'] LOG.debug('Update the dhcp service for %s on vnic %d tunnel ' '%d', edge_id, vnic_index, tunnel_index) try: self._update_dhcp_internal_interface( context, edge_id, vnic_index, tunnel_index, network_id, address_groups) ports = self.nsxv_plugin.get_ports( context, filters={'network_id': [network_id]}) inst_ports = [port for port in ports if port['device_owner'].startswith( "compute")] if inst_ports: # update dhcp service config for the new added network self.update_dhcp_service_config(context, edge_id) except nsxapi_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception( 'Failed to update the dhcp service for ' '%(edge_id)s on vnic %(vnic_index)d ' 'tunnel %(tunnel_index)d', {'edge_id': edge_id, 'vnic_index': vnic_index, 'tunnel_index': tunnel_index}) self._mark_router_bindings_status_error( context, edge_id, error_reason="update dhcp edge service") except Exception: with excutils.save_and_reraise_exception(): LOG.exception( 'Failed to update the dhcp service for ' '%(edge_id)s on vnic %(vnic_index)d ' 'tunnel %(tunnel_index)d', {'edge_id': edge_id, 'vnic_index': vnic_index, 'tunnel_index': tunnel_index}) def delete_dhcp_edge_service(self, context, network_id): """Delete an edge for dhcp service.""" resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] edge_binding = nsxv_db.get_nsxv_router_binding(context.session, resource_id) if edge_binding: dhcp_binding = nsxv_db.get_edge_vnic_binding( context.session, edge_binding['edge_id'], network_id) if dhcp_binding: edge_id = dhcp_binding['edge_id'] with locking.LockManager.get_lock(str(edge_id)): vnic_index = dhcp_binding['vnic_index'] tunnel_index = dhcp_binding['tunnel_index'] LOG.debug("Delete the tunnel %d on vnic %d from DHCP Edge " "%s", tunnel_index, vnic_index, edge_id) nsxv_db.free_edge_vnic_by_network(context.session, edge_id, network_id) try: self._delete_dhcp_internal_interface(context, edge_id, vnic_index, tunnel_index, network_id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to delete the tunnel ' '%(tunnel_index)d on vnic ' '%(vnic_index)d' 'from DHCP Edge %(edge_id)s', {'tunnel_index': tunnel_index, 'vnic_index': vnic_index, 'edge_id': edge_id}) def _update_address_in_dict(self, address_groups, old_ip, new_ip, subnet_mask): """Update the address_groups data structure to replace the old ip with a new one. If the old ip is None - if the ip matches an existing subnet: add it as a secondary ip. else - add a new address group for the new ip If the new ip is none - delete the primary/secondary entry with the old ip. If the old ip was not found - return False Otherwise - return True """ if old_ip is None: # Adding a new IP # look for an address group with a primary ip in the same subnet # as the new ip for address_group in address_groups['addressGroups']: if (netaddr.IPAddress(new_ip) in netaddr.IPNetwork(address_group['primaryAddress'] + '/' + address_group['subnetPrefixLength'])): # we should add the new ip as a secondary address in this # address group if (address_group.get('secondaryAddresses') is not None): secondary = address_group['secondaryAddresses'] secondary['ipAddress'].append(new_ip) else: address_group['secondaryAddresses'] = { 'type': 'secondary_addresses', 'ipAddress': [new_ip]} return True # Could not find the same subnet - add a new address group address_group = { 'primaryAddress': new_ip, 'subnetMask': subnet_mask } address_groups['addressGroups'].append(address_group) return True else: for ind, address_group in enumerate( address_groups['addressGroups']): if address_group['primaryAddress'] == old_ip: # this is the one we should update if new_ip: address_group['primaryAddress'] = new_ip else: # delete this entry address_groups['addressGroups'].pop(ind) return True # try to find a match in the secondary ips if (address_group.get('secondaryAddresses') is not None): secondary = address_group['secondaryAddresses'] secondary_ips = secondary['ipAddress'] if old_ip in secondary_ips: # We should update the secondary addresses if new_ip: # replace the old with the new secondary_ips.remove(old_ip) secondary_ips.append(new_ip) else: # delete this entry if len(secondary_ips) == 1: # delete the whole structure del address_group['secondaryAddresses'] else: secondary_ips.remove(old_ip) return True # The old ip was not found return False def update_interface_addr(self, context, edge_id, old_ip, new_ip, subnet_mask, is_uplink=False): with locking.LockManager.get_lock(edge_id): # get the current interfaces configuration r = self.nsxv_manager.vcns.get_interfaces(edge_id)[1] vnics = r.get('vnics', []) # Go over the vnics to find the one we should update for vnic in vnics: if ((is_uplink and vnic['type'] == 'uplink') or not is_uplink and vnic['type'] != 'uplink'): if self._update_address_in_dict( vnic['addressGroups'], old_ip, new_ip, subnet_mask): self.nsxv_manager.vcns.update_interface(edge_id, vnic) return # If we got here - we didn't find the old ip: error = (_("Failed to update interface ip " "on edge %(eid)s: Cannot find the previous ip %(ip)s") % {'eid': edge_id, 'ip': old_ip}) raise nsx_exc.NsxPluginException(err_msg=error) def update_vdr_interface_addr(self, context, edge_id, vnic_index, old_ip, new_ip, subnet_mask): with locking.LockManager.get_lock(edge_id): # get the current interfaces configuration vnic = self.nsxv_manager.vcns.get_vdr_internal_interface( edge_id, vnic_index)[1] if self._update_address_in_dict( vnic['addressGroups'], old_ip, new_ip, subnet_mask): interface_req = {'interface': vnic} self.nsxv_manager.vcns.update_vdr_internal_interface( edge_id, vnic_index, interface_req) return # If we got here - we didn't find the old ip: error = (_("Failed to update VDR interface ip " "on edge %(eid)s: Cannot find the previous ip %(ip)s") % {'eid': edge_id, 'ip': old_ip}) raise nsx_exc.NsxPluginException(err_msg=error) def get_plr_by_tlr_id(self, context, router_id): lswitch_id = nsxv_db.get_nsxv_router_binding( context.session, router_id).lswitch_id if lswitch_id: edge_vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_int_lswitch( context.session, lswitch_id) if edge_vnic_bindings: for edge_vnic_binding in edge_vnic_bindings: plr_router_id = nsxv_db.get_nsxv_router_bindings_by_edge( context.session, edge_vnic_binding.edge_id)[0].router_id if plr_router_id != router_id: return plr_router_id def create_plr_with_tlr_id(self, context, router_id, router_name, availability_zone): # Add an internal network preparing for connecting the VDR # to a PLR tlr_edge_id = nsxv_db.get_nsxv_router_binding( context.session, router_id).edge_id # First create an internal lswitch lswitch_name = ('int-' + router_name + router_id)[:36] virtual_wire = {"name": lswitch_name, "tenantId": "virtual wire tenant"} config_spec = {"virtualWireCreateSpec": virtual_wire} vdn_scope_id = availability_zone.vdn_scope_id h, lswitch_id = self.nsxv_manager.vcns.create_virtual_wire( vdn_scope_id, config_spec) # add vdr's external interface to the lswitch tlr_vnic_index = self.nsxv_manager.add_vdr_internal_interface( tlr_edge_id, lswitch_id, address=get_vdr_transit_network_tlr_address(), netmask=get_vdr_transit_network_netmask(), type="uplink") nsxv_db.create_edge_vnic_binding( context.session, tlr_edge_id, tlr_vnic_index, lswitch_id) # store the lswitch_id into nsxv_router_binding nsxv_db.update_nsxv_router_binding( context.session, router_id, lswitch_id=lswitch_id) # Handle plr relative op plr_router = {'name': router_name, 'id': (vcns_const.PLR_EDGE_PREFIX + _uuid())[:36]} self.create_lrouter( context, plr_router, availability_zone=availability_zone, appliance_size=cfg.CONF.nsxv.exclusive_router_appliance_size) binding = nsxv_db.get_nsxv_router_binding( context.session, plr_router['id']) plr_edge_id = binding['edge_id'] plr_vnic_index = nsxv_db.allocate_edge_vnic( context.session, plr_edge_id, lswitch_id).vnic_index #TODO(berlin): the internal ip should change based on vnic_index self.nsxv_manager.update_interface( plr_router['id'], plr_edge_id, plr_vnic_index, lswitch_id, address=get_vdr_transit_network_plr_address(), netmask=get_vdr_transit_network_netmask()) return plr_router['id'] def delete_plr_by_tlr_id(self, context, plr_id, router_id): # Delete plr's internal interface which connects to internal switch tlr_binding = nsxv_db.get_nsxv_router_binding( context.session, router_id) lswitch_id = tlr_binding.lswitch_id tlr_edge_id = tlr_binding.edge_id router_binding = nsxv_db.get_nsxv_router_binding( context.session, plr_id) if router_binding is None: LOG.error("Router binding not found for router: %s", router_id) else: plr_edge_id = router_binding.edge_id vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, plr_edge_id, lswitch_id) if vnic_binding is None: LOG.error("Vnic binding not found for router: %s", router_id) else: # Clear static routes before delete internal vnic self.nsxv_manager.update_routes(plr_edge_id, None, []) # Delete internal vnic self.nsxv_manager.delete_interface(plr_id, plr_edge_id, vnic_binding.vnic_index) nsxv_db.free_edge_vnic_by_network( context.session, plr_edge_id, lswitch_id) # Delete the PLR self.delete_lrouter(context, plr_id) # Clear static routes of vdr self.nsxv_manager.update_routes(tlr_edge_id, None, []) #First delete the vdr's external interface tlr_vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, tlr_edge_id, lswitch_id) if tlr_vnic_binding is None: LOG.error("Vnic binding not found for router: %s", router_id) else: self.nsxv_manager.delete_vdr_internal_interface( tlr_edge_id, tlr_vnic_binding.vnic_index) nsxv_db.delete_edge_vnic_binding_by_network( context.session, tlr_edge_id, lswitch_id) try: # Then delete the internal lswitch self.nsxv_manager.delete_virtual_wire(lswitch_id) except Exception: LOG.warning("Failed to delete virtual wire: %s", lswitch_id) def get_routers_on_edge(self, context, edge_id): router_ids = [] valid_router_ids = [] if edge_id: router_ids = [ binding['router_id'] for binding in nsxv_db.get_nsxv_router_bindings_by_edge( context.session, edge_id)] if router_ids: valid_router_ids = self.plugin.get_routers( context.elevated(), filters={'id': router_ids}, fields=['id']) valid_router_ids = [ele['id'] for ele in valid_router_ids] if set(valid_router_ids) != set(router_ids): LOG.error("Get invalid router bindings with " "router ids: %s", str(set(router_ids) - set(valid_router_ids))) return valid_router_ids def get_routers_on_same_edge(self, context, router_id): edge_binding = nsxv_db.get_nsxv_router_binding( context.session, router_id) if edge_binding: return self.get_routers_on_edge(context, edge_binding['edge_id']) return [] def bind_router_on_available_edge( self, context, target_router_id, optional_router_ids, conflict_router_ids, conflict_network_ids, network_number, availability_zone): """Bind logical shared router on an available edge. Return True if the logical router is bound to a new edge. """ with locking.LockManager.get_lock('nsx-edge-router'): optional_edge_ids = [] conflict_edge_ids = [] for router_id in optional_router_ids: binding = nsxv_db.get_nsxv_router_binding( context.session, router_id) if (binding and binding.status == constants.ACTIVE and binding.availability_zone == availability_zone.name and binding.edge_id not in optional_edge_ids): optional_edge_ids.append(binding.edge_id) for router_id in conflict_router_ids: binding = nsxv_db.get_nsxv_router_binding( context.session, router_id) if binding and binding.edge_id not in conflict_edge_ids: conflict_edge_ids.append(binding.edge_id) optional_edge_ids = list( set(optional_edge_ids) - set(conflict_edge_ids)) max_net_number = 0 available_edge_id = None for edge_id in optional_edge_ids: edge_vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, edge_id) # one vnic is used to provide external access. net_number = ( vcns_const.MAX_VNIC_NUM - len(edge_vnic_bindings) - 1) if (net_number > max_net_number and net_number >= network_number): net_ids = [vnic_binding.network_id for vnic_binding in edge_vnic_bindings] if not (set(conflict_network_ids) & set(net_ids)): max_net_number = net_number available_edge_id = edge_id else: # TODO(yangyu): Remove conflict_network_ids LOG.warning( "Failed to query conflict_router_ids") if available_edge_id: edge_binding = nsxv_db.get_nsxv_router_bindings_by_edge( context.session, available_edge_id)[0] nsxv_db.add_nsxv_router_binding( context.session, target_router_id, edge_binding.edge_id, None, edge_binding.status, edge_binding.appliance_size, edge_binding.edge_type, availability_zone=availability_zone.name) else: router_name = ('shared' + '-' + _uuid())[ :vcns_const.EDGE_NAME_LEN] self._allocate_edge_appliance( context, target_router_id, router_name, appliance_size=cfg.CONF.nsxv.shared_router_appliance_size, availability_zone=availability_zone) return True def unbind_router_on_edge(self, context, router_id): """Unbind a logical router from edge. Return True if no logical router bound to the edge. """ with locking.LockManager.get_lock('nsx-edge-router'): # free edge if no other routers bound to the edge router_ids = self.get_routers_on_same_edge(context, router_id) if router_ids == [router_id]: self._free_edge_appliance(context, router_id) return True else: nsxv_db.delete_nsxv_router_binding(context.session, router_id) def is_router_conflict_on_edge(self, context, router_id, conflict_router_ids, conflict_network_ids, intf_num=0): with locking.LockManager.get_lock('nsx-edge-router'): router_ids = self.get_routers_on_same_edge(context, router_id) if set(router_ids) & set(conflict_router_ids): return True router_binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) edge_vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, router_binding.edge_id) if (vcns_const.MAX_VNIC_NUM - len(edge_vnic_bindings ) - 1 < intf_num): LOG.debug("There isn't available edge vnic for the router: %s", router_id) return True for binding in edge_vnic_bindings: if binding.network_id in conflict_network_ids: return True return False def delete_dhcp_binding(self, context, port_id, network_id, mac_address): edge_id = get_dhcp_edge_id(context, network_id) if edge_id: dhcp_binding = nsxv_db.get_edge_dhcp_static_binding( context.session, edge_id, mac_address) if dhcp_binding: with locking.LockManager.get_lock(str(edge_id)): # We need to read the binding from the NSX to check that # we are not deleting a updated entry. This may be the # result of a async nova create and nova delete and the # same port IP is selected binding = get_dhcp_binding_for_binding_id( self.nsxv_manager, edge_id, dhcp_binding.binding_id) # The hostname is the port_id so we have a unique # identifier if binding and binding['hostname'] == port_id: self.nsxv_manager.vcns.delete_dhcp_binding( edge_id, dhcp_binding.binding_id) else: LOG.warning("Failed to find binding on edge " "%(edge_id)s for port " "%(port_id)s with %(binding_id)s", {'edge_id': edge_id, 'port_id': port_id, 'binding_id': dhcp_binding.binding_id}) nsxv_db.delete_edge_dhcp_static_binding( context.session, edge_id, mac_address) else: LOG.warning("Failed to find dhcp binding on edge " "%(edge_id)s to DELETE for port " "%(port_id)s", {'edge_id': edge_id, 'port_id': port_id}) else: # This happens during network/subnet deletion LOG.info("Didn't delete dhcp binding for port %(port_id)s: " "No edge id", {'port_id': port_id}) @vcns.retry_upon_exception(nsxapi_exc.VcnsApiException, max_delay=10) def _create_dhcp_binding(self, context, edge_id, binding): try: h, c = self.nsxv_manager.vcns.create_dhcp_binding( edge_id, binding) binding_id = h['location'].split('/')[-1] nsxv_db.create_edge_dhcp_static_binding( context.session, edge_id, binding['macAddress'], binding_id) except nsxapi_exc.VcnsApiException as e: with excutils.save_and_reraise_exception(): binding_id = None desc = jsonutils.loads(e.response) if desc.get('errorCode') == ( vcns_const.NSX_ERROR_DHCP_DUPLICATE_MAC): bindings = get_dhcp_binding_mappings(self.nsxv_manager, edge_id) binding_id = bindings.get(binding['macAddress'].lower()) LOG.debug("Duplicate MAC for %s with binding %s", binding['macAddress'], binding_id) elif desc.get('errorCode') == ( vcns_const.NSX_ERROR_DHCP_OVERLAPPING_IP): bindings = get_dhcp_binding_mappings_for_ips( self.nsxv_manager, edge_id) binding_id = bindings.get(binding['ipAddress']) LOG.debug("Overlapping IP %s with binding %s", binding['ipAddress'], binding_id) elif desc.get('errorCode') == ( vcns_const.NSX_ERROR_DHCP_DUPLICATE_HOSTNAME): bindings = get_dhcp_binding_mappings_for_hostname( self.nsxv_manager, edge_id) binding_id = bindings.get(binding['hostname']) LOG.debug("Overlapping hostname %s with binding %s", binding['hostname'], binding_id) if binding_id: self.nsxv_manager.vcns.delete_dhcp_binding( edge_id, binding_id) nsxv_db.delete_edge_dhcp_static_binding_id( context.session, edge_id, binding_id) return binding_id def create_dhcp_bindings(self, context, port_id, network_id, bindings): edge_id = get_dhcp_edge_id(context, network_id) if edge_id: # Check port is still there try: # Reload port db info context.session.expire_all() self.plugin.get_port(context, port_id) except n_exc.PortNotFound: LOG.warning( "port %(port_id)s is deleted, so we would pass " "creating dhcp binding on edge %(edge_id)s", {'port_id': port_id, 'edge_id': edge_id}) return configured_bindings = [] try: for binding in bindings: with locking.LockManager.get_lock(str(edge_id)): binding_id = self._create_dhcp_binding( context, edge_id, binding) configured_bindings.append((binding_id, binding['macAddress'])) except nsxapi_exc.VcnsApiException: with excutils.save_and_reraise_exception(): for binding_id, mac_address in configured_bindings: with locking.LockManager.get_lock(str(edge_id)): self.nsxv_manager.vcns.delete_dhcp_binding( edge_id, binding_id) nsxv_db.delete_edge_dhcp_static_binding( context.session, edge_id, mac_address) else: LOG.warning("Failed to create dhcp bindings since dhcp edge " "for net %s not found at the backend", network_id) def _get_syslog_config_from_flavor(self, context, router_id, flavor_id): if not validators.is_attr_set(flavor_id): return metainfo = self.plugin.get_flavor_metainfo(context, flavor_id) return metainfo.get('syslog') def update_external_interface( self, nsxv_manager, context, router_id, ext_net_id, ipaddr, netmask, secondary=None): secondary = secondary or [] binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) # If no binding was found, no interface to update - exit if not binding: LOG.error('Edge binding not found for router %s', router_id) return net_bindings = nsxv_db.get_network_bindings( context.session, ext_net_id) if not net_bindings: az_name = binding.availability_zone az = self._availability_zones.get_availability_zone(az_name) vcns_network_id = az.external_network else: vcns_network_id = net_bindings[0].phy_uuid # reorganize external vnic's address groups if netmask: address_groups = [] addr_list = [] for str_cidr in netmask: ip_net = netaddr.IPNetwork(str_cidr) address_group = {'primaryAddress': None, 'subnetPrefixLength': str(ip_net.prefixlen)} if (ipaddr not in addr_list and _check_ipnet_ip(ip_net, ipaddr)): address_group['primaryAddress'] = ipaddr addr_list.append(ipaddr) for sec_ip in secondary: if (sec_ip not in addr_list and _check_ipnet_ip(ip_net, sec_ip)): if not address_group['primaryAddress']: address_group['primaryAddress'] = sec_ip else: if not address_group.get('secondaryAddresses'): address_group['secondaryAddresses'] = { 'ipAddress': [sec_ip], 'type': 'secondary_addresses'} else: address_group['secondaryAddresses'][ 'ipAddress'].append(sec_ip) addr_list.append(sec_ip) if address_group['primaryAddress']: address_groups.append(address_group) if ipaddr not in addr_list: LOG.error("primary address %s of ext vnic is not " "configured", ipaddr) if secondary: missed_ip_sec = set(secondary) - set(addr_list) if missed_ip_sec: LOG.error("secondary address %s of ext vnic are not " "configured", str(missed_ip_sec)) nsxv_manager.update_interface(router_id, binding['edge_id'], vcns_const.EXTERNAL_VNIC_INDEX, vcns_network_id, address_groups=address_groups) else: nsxv_manager.update_interface(router_id, binding['edge_id'], vcns_const.EXTERNAL_VNIC_INDEX, vcns_network_id, address=ipaddr, netmask=netmask, secondary=secondary) def create_lrouter(nsxv_manager, context, lrouter, lswitch=None, dist=False, availability_zone=None): """Create an edge for logical router support.""" router_id = lrouter['id'] router_name = lrouter['name'] + '-' + router_id appliance_size = vcns_const.SERVICE_SIZE_MAPPING['router'] # store router-edge mapping binding nsxv_db.add_nsxv_router_binding( context.session, router_id, None, None, constants.PENDING_CREATE, appliance_size=appliance_size, availability_zone=availability_zone.name) # deploy edge nsxv_manager.deploy_edge( context, router_id, router_name, internal_network=None, dist=dist, appliance_size=appliance_size, availability_zone=availability_zone) def delete_lrouter(nsxv_manager, context, router_id, dist=False): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if binding: nsxv_db.update_nsxv_router_binding( context.session, router_id, status=constants.PENDING_DELETE) edge_id = binding['edge_id'] # delete edge nsxv_manager.delete_edge(context, router_id, edge_id, dist=dist) else: LOG.warning("router binding for router: %s not found", router_id) def remove_irrelevant_keys_from_edge_request(edge_request): """Remove some unnecessary keys from the edge request. Having these keys fail the update edge NSX transaction """ for key in ['status', 'datacenterMoid', 'fqdn', 'version', 'tenant', 'datacenterName', 'hypervisorAssist', 'universal', 'enableFips']: edge_request.pop(key, None) def _retrieve_nsx_switch_id(context, network_id, az_name): """Helper method to retrieve backend switch ID.""" bindings = nsxv_db.get_network_bindings(context.session, network_id) if bindings: binding = bindings[0] network_type = binding['binding_type'] if (network_type == c_utils.NsxVNetworkTypes.VLAN and binding['phy_uuid'] != ''): if ',' not in binding['phy_uuid']: dvs_id = binding['phy_uuid'] else: # If network is of type VLAN and multiple dvs associated with # one neutron network, retrieve the logical network id for the # edge/mgmt cluster's DVS from the networks availability zone. azs = nsx_az.NsxVAvailabilityZones() az = azs.get_availability_zone(az_name) dvs_id = az.dvs_id return nsx_db.get_nsx_switch_id_for_dvs( context.session, network_id, dvs_id) # Get the physical port group /wire id of the network id mappings = nsx_db.get_nsx_switch_ids(context.session, network_id) if mappings: return mappings[0] raise nsx_exc.NsxPluginException( err_msg=_("Network %s not found at the backend") % network_id) def get_dhcp_edge_id(context, network_id): # Query edge id resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] binding = nsxv_db.get_nsxv_router_binding(context.session, resource_id) if binding: edge_id = binding['edge_id'] return edge_id def get_dhcp_binding_mappings(nsxv_manager, edge_id): dhcp_config = query_dhcp_service_config(nsxv_manager, edge_id) bindings_get = {} if dhcp_config: for binding in dhcp_config['staticBindings']['staticBindings']: bindings_get[binding['macAddress'].lower()] = binding['bindingId'] return bindings_get def get_dhcp_binding_mappings_for_ips(nsxv_manager, edge_id): dhcp_config = query_dhcp_service_config(nsxv_manager, edge_id) bindings_get = {} if dhcp_config: for binding in dhcp_config['staticBindings']['staticBindings']: bindings_get[binding['ipAddress']] = binding['bindingId'] return bindings_get def get_dhcp_binding_mappings_for_hostname(nsxv_manager, edge_id): dhcp_config = query_dhcp_service_config(nsxv_manager, edge_id) bindings_get = {} if dhcp_config: for binding in dhcp_config['staticBindings']['staticBindings']: bindings_get[binding['hostname']] = binding['bindingId'] return bindings_get def _get_dhcp_binding_for_binding_id(nsxv_manager, edge_id, binding_id): dhcp_config = query_dhcp_service_config(nsxv_manager, edge_id) if dhcp_config: for binding in dhcp_config['staticBindings']['staticBindings']: if binding['bindingId'] == binding_id: return binding def _get_dhcp_binding(nsxv_manager, edge_id, binding_id): try: h, dhcp_binding = nsxv_manager.vcns.get_dhcp_binding(edge_id, binding_id) return dhcp_binding except Exception: return def get_dhcp_binding_for_binding_id(nsxv_manager, edge_id, binding_id): # API for specific binding is supported in NSX 6.2.8 and 6.3.3 onwards ver = nsxv_manager.vcns.get_version() if c_utils.is_nsxv_dhcp_binding_supported(ver): return _get_dhcp_binding(nsxv_manager, edge_id, binding_id) else: return _get_dhcp_binding_for_binding_id(nsxv_manager, edge_id, binding_id) def query_dhcp_service_config(nsxv_manager, edge_id): """Retrieve the current DHCP configuration from the edge.""" _, dhcp_config = nsxv_manager.vcns.query_dhcp_configuration(edge_id) return dhcp_config def get_router_edge_id(context, router_id): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if binding: return binding['edge_id'] def update_gateway(nsxv_manager, context, router_id, nexthop, routes=None): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) edge_id = binding['edge_id'] if routes is None: routes = [] nsxv_manager.update_routes(edge_id, nexthop, routes) def get_routes(edge_manager, context, router_id): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if not binding: LOG.error('Router binding not found for router %s', router_id) return [] edge_id = binding['edge_id'] vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_edge(context.session, edge_id) if not vnic_bindings: LOG.error('vNic binding not found for edge %s', edge_id) return [] h, routes = edge_manager.vcns.get_routes(edge_id) edge_routes = routes.get('staticRoutes') routes = [] for edge_route in edge_routes.get('staticRoutes'): for vnic_binding in vnic_bindings: if vnic_binding['vnic_index'] == int(edge_route['vnic']): route = {'network_id': vnic_binding['network_id'], 'nexthop': edge_route['nextHop'], 'destination': edge_route['network']} routes.append(route) break return routes def update_routes(edge_manager, context, router_id, routes, nexthop=None): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if not binding: LOG.error('Router binding not found for router %s', router_id) return edge_id = binding['edge_id'] edge_routes = [] for route in routes: if not route.get('network_id'): LOG.warning("There is no network info for the route %s, so " "the route entry would not be executed!", route) continue if route.get('external'): edge_routes.append({ 'vnic_index': vcns_const.EXTERNAL_VNIC_INDEX, 'cidr': route['destination'], 'nexthop': route['nexthop']}) else: vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, edge_id, route['network_id']) if (netaddr.IPAddress(route['nexthop']) in netaddr.IPNetwork(route['destination'])): # check that the nexthop is not in the destination LOG.error("Cannot add route with nexthop %(nexthop)s " "contained in the destination: %(dest)s.", {'dest': route['destination'], 'nexthop': route['nexthop']}) continue if vnic_binding and vnic_binding.get('vnic_index'): edge_routes.append({ 'vnic_index': vnic_binding['vnic_index'], 'cidr': route['destination'], 'nexthop': route['nexthop']}) else: LOG.error("vnic binding on edge %(edge_id)s for network " "%(net_id)s not found, so route: destination: " "%(dest)s, nexthop: %(nexthop)s can't be " "applied!", {'edge_id': edge_id, 'net_id': route['network_id'], 'dest': route['destination'], 'nexthop': route['nexthop']}) edge_manager.update_routes(edge_id, nexthop, edge_routes) def get_internal_lswitch_id_of_plr_tlr(context, router_id): return nsxv_db.get_nsxv_router_binding( context.session, router_id).lswitch_id def get_internal_vnic_index_of_plr_tlr(context, router_id): router_binding = nsxv_db.get_nsxv_router_binding( context.session, router_id) edge_vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, router_binding.edge_id, router_binding.lswitch_id) return edge_vnic_binding.vnic_index def clear_gateway(nsxv_manager, context, router_id): return update_gateway(nsxv_manager, context, router_id, None) def _check_ipnet_ip(ipnet, ip_address): """Check one ip is valid ip from ipnet.""" ip = netaddr.IPAddress(ip_address) if (ip != ipnet.netmask and ip != ipnet[-1] and ipnet.netmask & ip == ipnet.network): return True return False def update_internal_interface(nsxv_manager, context, router_id, int_net_id, address_groups, is_connected=True): # Get edge id binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) edge_id = binding['edge_id'] # Get the pg/wire id of the network id az_name = binding['availability_zone'] vcns_network_id = _retrieve_nsx_switch_id(context, int_net_id, az_name) LOG.debug("Network id %(network_id)s corresponding ref is : " "%(net_moref)s", {'network_id': int_net_id, 'net_moref': vcns_network_id}) edge_vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, edge_id, int_net_id) # if edge_vnic_binding is None, then first select one available # internal vnic for connection. if not edge_vnic_binding: edge_vnic_binding = nsxv_db.allocate_edge_vnic( context.session, edge_id, int_net_id) nsxv_manager.update_interface(router_id, edge_id, edge_vnic_binding.vnic_index, vcns_network_id, is_connected=is_connected, address_groups=address_groups) def add_vdr_internal_interface(nsxv_manager, context, router_id, int_net_id, address_groups, is_connected=True): # Get edge id binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) edge_id = binding['edge_id'] # Get the pg/wire id of the network id az_name = binding['availability_zone'] vcns_network_id = _retrieve_nsx_switch_id(context, int_net_id, az_name) LOG.debug("Network id %(network_id)s corresponding ref is : " "%(net_moref)s", {'network_id': int_net_id, 'net_moref': vcns_network_id}) edge_vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, edge_id, int_net_id) if not edge_vnic_binding: vnic_index = nsxv_manager.add_vdr_internal_interface( edge_id, vcns_network_id, address_groups=address_groups, is_connected=is_connected) nsxv_db.create_edge_vnic_binding( context.session, edge_id, vnic_index, int_net_id) else: msg = (_("Distributed Router doesn't support multiple subnets " "with same network attached to it.")) raise n_exc.BadRequest(resource='vdr', msg=msg) def update_vdr_internal_interface(nsxv_manager, context, router_id, int_net_id, address_groups, is_connected=True): # Get edge id binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) edge_id = binding['edge_id'] # Get the pg/wire id of the network id az_name = binding['availability_zone'] vcns_network_id = _retrieve_nsx_switch_id(context, int_net_id, az_name) LOG.debug("Network id %(network_id)s corresponding ref is : " "%(net_moref)s", {'network_id': int_net_id, 'net_moref': vcns_network_id}) edge_vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, edge_id, int_net_id) nsxv_manager.update_vdr_internal_interface( edge_id, edge_vnic_binding.vnic_index, vcns_network_id, address_groups=address_groups, is_connected=is_connected) def delete_interface(nsxv_manager, context, router_id, network_id, dist=False): # Get edge id binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if not binding: LOG.warning("Failed to find the router binding for router %s", router_id) return edge_id = binding['edge_id'] # Get the pg/wire id of the network id az_name = binding['availability_zone'] vcns_network_id = _retrieve_nsx_switch_id(context, network_id, az_name) LOG.debug("Network id %(network_id)s corresponding ref is : " "%(net_moref)s", {'network_id': network_id, 'net_moref': vcns_network_id}) edge_vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, edge_id, network_id) if not edge_vnic_binding: LOG.warning("Failed to find the network %(net_id)s " "corresponding vnic index on edge %(edge_id)s", {'net_id': network_id, 'edge_id': edge_id}) return if not dist: nsxv_manager.delete_interface( router_id, edge_id, edge_vnic_binding.vnic_index) nsxv_db.free_edge_vnic_by_network( context.session, edge_id, network_id) else: nsxv_manager.delete_vdr_internal_interface( edge_id, edge_vnic_binding.vnic_index) nsxv_db.delete_edge_vnic_binding_by_network( context.session, edge_id, network_id) def update_nat_rules(nsxv_manager, context, router_id, snat, dnat, az=None): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if binding: if not az: azs = nsx_az.NsxVAvailabilityZones() az = azs.get_availability_zone(binding['availability_zone']) bind_to_all = az.bind_floatingip_to_all_interfaces indices = None if bind_to_all: # from 6.2.4 onwards, unspecified vnic will result # in binding the rule to all interfaces ver = nsxv_manager.vcns.get_version() if version.LooseVersion(ver) < version.LooseVersion('6.2.4'): LOG.debug("NSX version %s requires explicit nat rule " "for each interface", ver) edge_id = binding['edge_id'] vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, edge_id) indices = [vnic_binding.vnic_index for vnic_binding in vnic_bindings] indices.append(vcns_const.EXTERNAL_VNIC_INDEX) else: LOG.debug("Configuring nat rules on external " "interface only for %s", router_id) indices = [vcns_const.EXTERNAL_VNIC_INDEX] nsxv_manager.update_nat_rules(binding['edge_id'], snat, dnat, indices) else: LOG.warning("Bindings do not exists for %s", router_id) def clear_nat_rules(nsxv_manager, context, router_id): update_nat_rules(nsxv_manager, context, router_id, [], []) def update_firewall(nsxv_manager, context, router_id, firewall, allow_external=True): binding = nsxv_db.get_nsxv_router_binding( context.session, router_id) if binding: edge_id = binding['edge_id'] nsxv_manager.update_firewall(edge_id, firewall, context, allow_external=allow_external) else: LOG.warning("Bindings do not exists for %s", router_id) def check_network_in_use_at_backend(context, network_id): retries = max(cfg.CONF.nsxv.retries, 1) delay = 0.5 for attempt in range(1, retries + 1): if attempt != 1: time.sleep(delay) delay = min(2 * delay, 60) edge_vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_int_lswitch( context.session, network_id) if not edge_vnic_bindings: return LOG.warning('NSXv: network is still in use at the backend') LOG.error('NSXv: network is still in use at the backend') def default_loglevel_modifier(config, level): """Modify log level settings in edge config bulk (standard syntax)""" if 'logging' not in config: LOG.error("Logging section missing in configuration") return False enable = True if level == 'none': enable = False level = 'info' # default config['logging']['enable'] = enable config['logging']['logLevel'] = level return True def routing_loglevel_modifier(config, level): """Modify log level in routing global settings""" if 'routingGlobalConfig' not in config: LOG.error("routingGlobalConfig section missing in config") return False return default_loglevel_modifier(config['routingGlobalConfig'], level) def get_loglevel_modifier(module, level): """Pick modifier according to module and set log level""" special_modifiers = {'routing': routing_loglevel_modifier} modifier = default_loglevel_modifier if module in special_modifiers.keys(): modifier = special_modifiers[module] def wrapper(config): return modifier(config, level) return wrapper def update_edge_loglevel(vcns, edge_id, module, level): """Update loglevel on edge for specified module""" if module not in SUPPORTED_EDGE_LOG_MODULES: LOG.error("Unrecognized logging module %s - ignored", module) return if level not in SUPPORTED_EDGE_LOG_LEVELS: LOG.error("Unrecognized log level %s - ignored", level) return vcns.update_edge_config_with_modifier(edge_id, module, get_loglevel_modifier(module, level)) def update_edge_host_groups(vcns, edge_id, dvs, availability_zone, validate=False): # Update edge DRS host groups h, appliances = vcns.get_edge_appliances(edge_id) vms = [appliance['vmId'] for appliance in appliances['appliances']] if validate: configured_vms = dvs.get_configured_vms( availability_zone.resource_pool, len(availability_zone.edge_host_groups)) for vm in vms: if vm in configured_vms: LOG.info('Edge %s already configured', edge_id) return LOG.info('Create DRS groups for %(vms)s on edge %(edge_id)s', {'vms': vms, 'edge_id': edge_id}) # Ensure random distribution of the VMs if availability_zone.ha_placement_random: if len(vms) < len(availability_zone.edge_host_groups): # add some empty vms to the list, so it will randomize between # all host groups vms.extend([None] * (len(availability_zone.edge_host_groups) - len(vms))) random.shuffle(vms) try: dvs.update_cluster_edge_failover( availability_zone.resource_pool, vms, availability_zone.edge_host_groups) except Exception as e: LOG.error('Unable to create DRS groups for ' '%(vms)s on edge %(edge_id)s. Error: %(e)s', {'vms': vms, 'edge_id': edge_id, 'e': e}) def clean_host_groups(dvs, availability_zone): try: LOG.info('Cleaning up host groups for AZ %s', availability_zone.name) dvs.cluster_host_group_cleanup( availability_zone.resource_pool, len(availability_zone.edge_host_groups)) except Exception as e: LOG.error('Unable to cleanup. Error: %s', e) class NsxVCallbacks(object): """Edge callback implementation Callback functions for asynchronous tasks. """ def __init__(self, plugin): self.plugin = plugin if cfg.CONF.nsxv.use_dvs_features: self._vcm = dvs.VCManager() else: self._vcm = None def complete_edge_creation(self, context, edge_id, name, router_id, dist, deploy_successful, availability_zone=None, deploy_metadata=False): router_db = None if uuidutils.is_uuid_like(router_id): try: router_db = self.plugin._get_router(context, router_id) except l3_exc.RouterNotFound: # Router might have been deleted before deploy finished LOG.warning("Router %s not found", name) if deploy_successful: metadata_proxy_handler = self.plugin.get_metadata_proxy_handler( availability_zone.name) if deploy_metadata and metadata_proxy_handler: LOG.debug('Update metadata for resource %s', router_id) metadata_proxy_handler.configure_router_edge( context, router_id) self.plugin.setup_dhcp_edge_fw_rules(context, self.plugin, router_id) LOG.debug("Successfully deployed %(edge_id)s for router %(name)s", {'edge_id': edge_id, 'name': name}) if (router_db and router_db['status'] == constants.PENDING_CREATE): router_db['status'] = constants.ACTIVE nsxv_db.update_nsxv_router_binding( context.session, router_id, status=constants.ACTIVE) if (not dist and self._vcm and availability_zone and availability_zone.edge_ha and availability_zone.edge_host_groups): with locking.LockManager.get_lock('nsx-vc-drs-update'): update_edge_host_groups(self.plugin.nsx_v.vcns, edge_id, self._vcm, availability_zone, validate=True) else: LOG.error("Failed to deploy Edge for router %s", name) if router_db: router_db['status'] = constants.ERROR nsxv_db.update_nsxv_router_binding( context.session, router_id, status=constants.ERROR) if not dist and edge_id: nsxv_db.clean_edge_vnic_binding( context.session, edge_id) def complete_edge_update( self, context, edge_id, router_id, successful, set_errors): if successful: LOG.debug("Successfully updated %(edge_id)s for router " "%(router_id)s", {'edge_id': edge_id, 'router_id': router_id}) else: LOG.error("Failed to update %(edge_id)s for router " "%(router_id)s", {'edge_id': edge_id, 'router_id': router_id}) admin_ctx = q_context.get_admin_context() if nsxv_db.get_nsxv_router_binding(admin_ctx.session, router_id): nsxv_db.update_nsxv_router_binding( admin_ctx.session, router_id, status=constants.ERROR) if set_errors and context: # Set the router status to ERROR try: router_db = self.plugin._get_router(context, router_id) router_db['status'] = constants.ERROR except l3_exc.RouterNotFound: # Router might have been deleted before deploy finished LOG.warning("Router %s not found", router_id) def interface_update_result(self, task): LOG.debug("interface_update_result %d", task.status) vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/edge_ipsecvpn_driver.py0000666000175100017510000001467513244523345027157 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.plugins.nsx_v.vshield.common import ( exceptions as vcns_exc) LOG = logging.getLogger(__name__) ENCRYPTION_ALGORITHM_MAP = { '3des': '3des', 'aes-128': 'aes', 'aes-256': 'aes256' } PFS_MAP = { 'group2': 'dh2', 'group5': 'dh5'} TRANSFORM_PROTOCOL_ALLOWED = ('esp',) ENCAPSULATION_MODE_ALLOWED = ('tunnel',) class EdgeIPsecVpnDriver(object): """Driver APIs for Edge IPsec VPN bulk configuration.""" def _check_ikepolicy_ipsecpolicy_allowed(self, ikepolicy, ipsecpolicy): """Check whether ikepolicy and ipsecpolicy are allowed on vshield edge. Some IPsec VPN configurations and features are configured by default or not supported on vshield edge. """ # Check validation of IKEPolicy. if ikepolicy['ike_version'] != 'v1': msg = _("Unsupported ike_version: %s! Only 'v1' ike version is " "supported on vshield Edge!" ) % ikepolicy['ike_version'] LOG.warning(msg) raise vcns_exc.VcnsBadRequest(resource='ikepolicy', msg=msg) # In VSE, Phase 1 and Phase 2 share the same encryption_algorithm # and authentication algorithms setting. At present, just record the # discrepancy error in log and take ipsecpolicy to do configuration. if (ikepolicy['auth_algorithm'] != ipsecpolicy['auth_algorithm'] or ikepolicy['encryption_algorithm'] != ipsecpolicy[ 'encryption_algorithm'] or ikepolicy['pfs'] != ipsecpolicy['pfs']): LOG.warning( "IKEPolicy and IPsecPolicy should have consistent " "auth_algorithm, encryption_algorithm and pfs for VSE!") # Check whether encryption_algorithm is allowed. encryption_algorithm = ENCRYPTION_ALGORITHM_MAP.get( ipsecpolicy.get('encryption_algorithm'), None) if not encryption_algorithm: msg = _("Unsupported encryption_algorithm: %s! '3des', " "'aes-128' and 'aes-256' are supported on VSE right now." ) % ipsecpolicy['encryption_algorithm'] LOG.warning(msg) raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', msg=msg) # Check whether pfs is allowed. if not PFS_MAP.get(ipsecpolicy['pfs']): msg = _("Unsupported pfs: %s! 'group2' and 'group5' " "are supported on VSE right now.") % ipsecpolicy['pfs'] LOG.warning(msg) raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', msg=msg) # Check whether transform protocol is allowed. if ipsecpolicy['transform_protocol'] not in TRANSFORM_PROTOCOL_ALLOWED: msg = _("Unsupported transform protocol: %s! 'esp' is supported " "by default on VSE right now." ) % ipsecpolicy['transform_protocol'] LOG.warning(msg) raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', msg=msg) # Check whether encapsulation mode is allowed. if ipsecpolicy['encapsulation_mode'] not in ENCAPSULATION_MODE_ALLOWED: msg = _("Unsupported encapsulation mode: %s! 'tunnel' is " "supported by default on VSE right now." ) % ipsecpolicy['encapsulation_mode'] LOG.warning(msg) raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', msg=msg) def _convert_ipsec_site(self, site, enablePfs=True): self._check_ikepolicy_ipsecpolicy_allowed( site['ikepolicy'], site['ipsecpolicy']) return { 'enabled': site['site'].get('admin_state_up'), 'enablePfs': enablePfs, 'dhGroup': PFS_MAP.get(site['ipsecpolicy']['pfs']), 'name': site['site'].get('name'), 'description': site['site'].get('description'), 'localId': site['external_ip'], 'localIp': site['external_ip'], 'peerId': site['site'].get('peer_id'), 'peerIp': site['site'].get('peer_address'), 'localSubnets': { 'subnets': [site['subnet'].get('cidr')]}, 'peerSubnets': { 'subnets': site['site'].get('peer_cidrs')}, 'authenticationMode': site['site'].get('auth_mode'), 'psk': site['site'].get('psk'), 'encryptionAlgorithm': ENCRYPTION_ALGORITHM_MAP.get( site['ipsecpolicy'].get('encryption_algorithm'))} def update_ipsec_config(self, edge_id, sites, enabled=True): ipsec_config = {'featureType': "ipsec_4.0", 'enabled': enabled} vse_sites = [self._convert_ipsec_site(site) for site in sites] ipsec_config['sites'] = {'sites': vse_sites} try: self.vcns.update_ipsec_config(edge_id, ipsec_config) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to update ipsec vpn " "configuration with edge_id: %s", edge_id) def delete_ipsec_config(self, edge_id): try: self.vcns.delete_ipsec_config(edge_id) except vcns_exc.ResourceNotFound: LOG.warning("IPsec config not found on edge: %s", edge_id) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to delete ipsec vpn configuration " "with edge_id: %s", edge_id) def get_ipsec_config(self, edge_id): return self.vcns.get_ipsec_config(edge_id) vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/vcns_driver.py0000666000175100017510000000461713244523345025310 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.plugins.nsx_v.vshield import edge_appliance_driver from vmware_nsx.plugins.nsx_v.vshield import edge_dynamic_routing_driver from vmware_nsx.plugins.nsx_v.vshield import edge_firewall_driver from vmware_nsx.plugins.nsx_v.vshield.tasks import tasks from vmware_nsx.plugins.nsx_v.vshield import vcns from vmware_nsx.services.lbaas.nsx_v.v2 import ( edge_loadbalancer_driver_v2 as lbaas_v2) LOG = logging.getLogger(__name__) class VcnsDriver(edge_appliance_driver.EdgeApplianceDriver, lbaas_v2.EdgeLoadbalancerDriverV2, edge_firewall_driver.EdgeFirewallDriver, edge_dynamic_routing_driver.EdgeDynamicRoutingDriver): def __init__(self, callbacks): super(VcnsDriver, self).__init__() self.callbacks = callbacks self.vcns_uri = cfg.CONF.nsxv.manager_uri self.vcns_user = cfg.CONF.nsxv.user self.vcns_passwd = cfg.CONF.nsxv.password self.ca_file = cfg.CONF.nsxv.ca_file self.insecure = cfg.CONF.nsxv.insecure self.deployment_container_id = cfg.CONF.nsxv.deployment_container_id self._pid = None self._task_manager = None self.vcns = vcns.Vcns(self.vcns_uri, self.vcns_user, self.vcns_passwd, self.ca_file, self.insecure) @property def task_manager(self): if (self._task_manager is None or self._pid != os.getpid()): LOG.debug("Creating task manager") self._pid = os.getpid() interval = cfg.CONF.nsxv.task_status_check_interval self._task_manager = tasks.TaskManager(interval) LOG.debug("Starting task manager") self._task_manager.start() return self._task_manager vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/__init__.py0000666000175100017510000000000013244523345024501 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/edge_dynamic_routing_driver.py0000666000175100017510000002151413244523345030511 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.common import locking LOG = logging.getLogger(__name__) class EdgeDynamicRoutingDriver(object): """Edge driver API to implement the dynamic routing""" def __init__(self): # it will be initialized at subclass self.vcns = None self.ecmp_wait_time = cfg.CONF.nsxv.ecmp_wait_time def _prepare_bgp_config(self, bgp_config): bgp_config.setdefault('enabled', False) bgp_config.setdefault('bgpNeighbours', {'bgpNeighbours': []}) bgp_config.setdefault('redistribution', {'rules': {'rules': []}}) curr_neighbours = [{'bgpNeighbour': nbr} for nbr in bgp_config['bgpNeighbours']['bgpNeighbours']] bgp_config['bgpNeighbours'] = curr_neighbours for nbr in curr_neighbours: bgp_filters = [{'bgpFilter': bf} for bf in nbr['bgpNeighbour']['bgpFilters']['bgpFilters']] nbr['bgpNeighbour']['bgpFilters'] = bgp_filters redistribution_rules = [{'rule': rule} for rule in bgp_config['redistribution']['rules']['rules']] bgp_config['redistribution']['rules'] = redistribution_rules def _get_routing_config(self, edge_id): h, config = self.vcns.get_edge_routing_config(edge_id) # Backend complains when adding this in the request. config.pop('featureType') config.pop('ospf') global_config = config['routingGlobalConfig'] bgp_config = config.get('bgp', {}) self._prepare_bgp_config(bgp_config) global_config.setdefault('ipPrefixes', {'ipPrefixes': []}) curr_prefixes = [{'ipPrefix': prx} for prx in global_config['ipPrefixes']['ipPrefixes']] global_config['ipPrefixes'] = curr_prefixes # Don't change any static routes. static_routing = config.get('staticRouting', {}) static_routes = static_routing.get('staticRoutes', {}) current_routes = [{'route': route} for route in static_routes.get('staticRoutes', [])] static_routing['staticRoutes'] = current_routes return {'routing': config} def _update_routing_config(self, edge_id, **kwargs): routing_config = self._get_routing_config(edge_id) global_config = routing_config['routing']['routingGlobalConfig'] current_prefixes = global_config['ipPrefixes'] global_config['ecmp'] = True if 'router_id' in kwargs: global_config['routerId'] = kwargs['router_id'] current_prefixes[:] = [p for p in current_prefixes if p['ipPrefix']['name'] not in kwargs.get('prefixes_to_remove', [])] # Avoid adding duplicate rules when shared router relocation current_prefixes.extend([p for p in kwargs.get('prefixes_to_add', []) if p not in current_prefixes]) self.vcns.update_edge_routing_config(edge_id, routing_config) def _reset_routing_global_config(self, edge_id): routing_config = self._get_routing_config(edge_id) global_config = routing_config['routing']['routingGlobalConfig'] global_config['ecmp'] = False global_config.pop('routerId') global_config.pop('ipPrefixes') self.vcns.update_edge_routing_config(edge_id, routing_config) def get_routing_bgp_config(self, edge_id): h, config = self.vcns.get_bgp_routing_config(edge_id) bgp_config = config if config else {} self._prepare_bgp_config(bgp_config) return {'bgp': bgp_config} def _update_bgp_routing_config(self, edge_id, **kwargs): bgp_config = self.get_routing_bgp_config(edge_id) curr_neighbours = bgp_config['bgp']['bgpNeighbours'] curr_rules = bgp_config['bgp']['redistribution']['rules'] bgp_config['bgp']['enabled'] = True if 'default_originate' in kwargs: bgp_config['bgp']['defaultOriginate'] = kwargs['default_originate'] if 'local_as' in kwargs: bgp_config['bgp']['localAS'] = kwargs['local_as'] if 'enabled' in kwargs: bgp_config['bgp']['redistribution']['enabled'] = kwargs['enabled'] curr_rules[:] = [rule for rule in curr_rules if rule['rule'].get('prefixName') not in kwargs.get('rules_to_remove', [])] # Avoid adding duplicate rules when shared router relocation curr_rules_prefixes = [r['rule'].get('prefixName') for r in curr_rules] curr_rules.extend([r for r in kwargs.get('rules_to_add', []) if r['rule'].get('prefixName') not in curr_rules_prefixes]) neighbours_to_remove = [nbr['bgpNeighbour']['ipAddress'] for nbr in kwargs.get('neighbours_to_remove', [])] curr_neighbours[:] = [nbr for nbr in curr_neighbours if nbr['bgpNeighbour']['ipAddress'] not in neighbours_to_remove] curr_neighbours.extend(kwargs.get('neighbours_to_add', [])) self.vcns.update_bgp_dynamic_routing(edge_id, bgp_config) def add_bgp_speaker_config(self, edge_id, prot_router_id, local_as, enabled, bgp_neighbours, prefixes, redistribution_rules, default_originate=False): with locking.LockManager.get_lock(str(edge_id)): self._update_routing_config(edge_id, router_id=prot_router_id, prefixes_to_add=prefixes) if self.ecmp_wait_time > 0: time.sleep(self.ecmp_wait_time) self._update_bgp_routing_config( edge_id, enabled=enabled, local_as=local_as, neighbours_to_add=bgp_neighbours, prefixes_to_add=prefixes, rules_to_add=redistribution_rules, default_originate=default_originate) def delete_bgp_speaker_config(self, edge_id): with locking.LockManager.get_lock(str(edge_id)): self.vcns.delete_bgp_routing_config(edge_id) self._reset_routing_global_config(edge_id) def add_bgp_neighbours(self, edge_id, bgp_neighbours): # Query the bgp config first and update the bgpNeighbour with locking.LockManager.get_lock(str(edge_id)): self._update_bgp_routing_config(edge_id, neighbours_to_add=bgp_neighbours) def remove_bgp_neighbours(self, edge_id, bgp_neighbours): with locking.LockManager.get_lock(str(edge_id)): self._update_bgp_routing_config( edge_id, neighbours_to_remove=bgp_neighbours) def update_bgp_neighbours(self, edge_id, neighbours_to_add=None, neighbours_to_remove=None): with locking.LockManager.get_lock(str(edge_id)): self._update_bgp_routing_config( edge_id, neighbours_to_add=neighbours_to_add, neighbours_to_remove=neighbours_to_remove) def update_routing_redistribution(self, edge_id, enabled): with locking.LockManager.get_lock(str(edge_id)): self._update_bgp_routing_config(edge_id, enabled=enabled) def add_bgp_redistribution_rules(self, edge_id, prefixes, rules): with locking.LockManager.get_lock(str(edge_id)): self._update_routing_config(edge_id, prefixes_to_add=prefixes) self._update_bgp_routing_config(edge_id, rules_to_add=rules) LOG.debug("Added redistribution rules %s on edge %s", rules, edge_id) def remove_bgp_redistribution_rules(self, edge_id, prefixes): with locking.LockManager.get_lock(str(edge_id)): self._update_bgp_routing_config(edge_id, rules_to_remove=prefixes) self._update_routing_config(edge_id, prefixes_to_remove=prefixes) LOG.debug("Removed redistribution rules for prefixes %s on edge %s", prefixes, edge_id) def update_router_id(self, edge_id, router_id): with locking.LockManager.get_lock(str(edge_id)): self._update_routing_config(edge_id, router_id=router_id) vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/vcns.py0000666000175100017510000013763213244523345023741 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import xml.etree.ElementTree as et from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import strutils import six from vmware_nsx.common import nsxv_constants from vmware_nsx.common import utils from vmware_nsx.plugins.nsx_v.vshield.common import constants from vmware_nsx.plugins.nsx_v.vshield.common import exceptions from vmware_nsx.plugins.nsx_v.vshield.common import VcnsApiClient LOG = logging.getLogger(__name__) HTTP_GET = "GET" HTTP_POST = "POST" HTTP_DELETE = "DELETE" HTTP_PUT = "PUT" URI_PREFIX = "/api/4.0/edges" #FwaaS constants FIREWALL_SERVICE = "firewall/config" FIREWALL_RULE_RESOURCE = "rules" #NSXv Constants FIREWALL_PREFIX = '/api/4.0/firewall/globalroot-0/config' FIREWALL_REDIRECT_SEC_TYPE = 'layer3redirectsections' SECURITYGROUP_PREFIX = '/api/2.0/services/securitygroup' VDN_PREFIX = '/api/2.0/vdn' SERVICES_PREFIX = '/api/2.0/services' SPOOFGUARD_PREFIX = '/api/4.0/services/spoofguard' TRUSTSTORE_PREFIX = '%s/%s' % (SERVICES_PREFIX, 'truststore') EXCLUDELIST_PREFIX = '/api/2.1/app/excludelist' SERVICE_INSERTION_PROFILE_PREFIX = '/api/2.0/si/serviceprofile' SECURITY_POLICY_PREFIX = '/api/2.0/services/policy/securitypolicy' APPLICATION_PREFIX = '%s/%s' % (SERVICES_PREFIX, 'application') #LbaaS Constants LOADBALANCER_SERVICE = "loadbalancer/config" LOADBALANCER_STATS = "loadbalancer/statistics" VIP_RESOURCE = "virtualservers" POOL_RESOURCE = "pools" MONITOR_RESOURCE = "monitors" APP_PROFILE_RESOURCE = "applicationprofiles" APP_RULE_RESOURCE = "applicationrules" # IPsec VPNaaS Constants IPSEC_VPN_SERVICE = 'ipsec/config' # Dhcp constants DHCP_SERVICE = "dhcp/config" DHCP_BINDING_RESOURCE = "bindings" # Syetem control constants SYSCTL_SERVICE = 'systemcontrol/config' # L2 gateway constants BRIDGE = "bridging/config" # IPAM constants IPAM_POOL_SCOPE = "scope/globalroot-0" IPAM_POOL_SERVICE = "ipam/pools" # Self Signed Certificate constants CSR = "csr" CERTIFICATE = "certificate" NETWORK_TYPES = ['Network', 'VirtualWire', 'DistributedVirtualPortgroup'] # Dynamic routing constants ROUTING_CONFIG = "routing/config" BGP_ROUTING_CONFIG = "routing/config/bgp" ELAPSED_TIME_THRESHOLD = 30 MAX_EDGE_DEPLOY_TIMEOUT = 1200 def retry_upon_exception_exclude_error_codes( exc, excluded_errors, delay=0.5, max_delay=4, max_attempts=0): if not max_attempts: max_attempts = cfg.CONF.nsxv.retries return utils.retry_upon_exception_exclude_error_codes( exc, excluded_errors, delay, max_delay, max_attempts) def retry_upon_exception(exc, delay=0.5, max_delay=4, max_attempts=0): if not max_attempts: max_attempts = cfg.CONF.nsxv.retries return utils.retry_upon_exception(exc, delay, max_delay, max_attempts) class Vcns(object): def __init__(self, address, user, password, ca_file, insecure): self.address = address self.user = user self.password = password self.ca_file = ca_file self.insecure = insecure self.jsonapi_client = VcnsApiClient.VcnsApiHelper( address, user, password, format='json', ca_file=ca_file, insecure=insecure, timeout=cfg.CONF.nsxv.nsx_transaction_timeout) self.xmlapi_client = VcnsApiClient.VcnsApiHelper( address, user, password, format='xml', ca_file=ca_file, insecure=insecure, timeout=cfg.CONF.nsxv.nsx_transaction_timeout) self._nsx_version = None self._normalized_scoping_objects = None self._normalized_global_objects = None @retry_upon_exception(exceptions.ServiceConflict) def _client_request(self, client, method, uri, params, headers, encodeParams, timeout=None): return client(method, uri, params, headers, encodeParams, timeout=timeout) def do_request(self, method, uri, params=None, format='json', **kwargs): msg = ("VcnsApiHelper('%(method)s', '%(uri)s', '%(body)s')" % {'method': method, 'uri': uri, 'body': jsonutils.dumps(params)}) LOG.debug(strutils.mask_password(msg)) headers = kwargs.get('headers') encodeParams = kwargs.get('encode', True) if format == 'json': _client = self.jsonapi_client.request else: _client = self.xmlapi_client.request timeout = kwargs.get('timeout') ts = time.time() header, content = self._client_request(_client, method, uri, params, headers, encodeParams, timeout=timeout) te = time.time() elapsed_time = te - ts LOG.debug('VcnsApiHelper for %(method)s %(uri)s took %(seconds)2.4f. ' 'reply: header=%(header)s content=%(content)s', {'method': method, 'uri': uri, 'header': header, 'content': content, 'seconds': elapsed_time}) if elapsed_time > ELAPSED_TIME_THRESHOLD: LOG.warning('Vcns call for %(method)s %(uri)s took %(seconds)2.4f', {'method': method, 'uri': uri, 'seconds': elapsed_time}) if content == '': return header, {} if kwargs.get('decode', True): content = jsonutils.loads(content) return header, content def edges_lock_operation(self): uri = URI_PREFIX + "?lockUpdatesOnEdge=true" return self.do_request(HTTP_POST, uri, decode=False) @retry_upon_exception(exceptions.ResourceNotFound) @retry_upon_exception(exceptions.RequestBad) def deploy_edge(self, request): uri = URI_PREFIX return self.do_request(HTTP_POST, uri, request, decode=False, timeout=MAX_EDGE_DEPLOY_TIMEOUT) def update_edge(self, edge_id, request): uri = "%s/%s" % (URI_PREFIX, edge_id) return self.do_request(HTTP_PUT, uri, request, decode=False) def get_edge_id(self, job_id): uri = URI_PREFIX + "/jobs/%s" % job_id return self.do_request(HTTP_GET, uri, decode=True) def get_edge_jobs(self, edge_id): uri = URI_PREFIX + "/%s/jobs" % edge_id return self.do_request(HTTP_GET, uri, decode=True) def get_edge_deploy_status(self, edge_id): uri = URI_PREFIX + "/%s/status?getlatest=false" % edge_id return self.do_request(HTTP_GET, uri, decode="True") def delete_edge(self, edge_id): uri = "%s/%s" % (URI_PREFIX, edge_id) return self.do_request(HTTP_DELETE, uri) def add_vdr_internal_interface(self, edge_id, interface): uri = "%s/%s/interfaces?action=patch" % (URI_PREFIX, edge_id) return self.do_request(HTTP_POST, uri, interface, decode=True) def get_vdr_internal_interface(self, edge_id, interface_index): uri = "%s/%s/interfaces/%s" % (URI_PREFIX, edge_id, interface_index) return self.do_request(HTTP_GET, uri, decode=True) def update_vdr_internal_interface(self, edge_id, interface_index, interface): uri = "%s/%s/interfaces/%s" % (URI_PREFIX, edge_id, interface_index) return self.do_request(HTTP_PUT, uri, interface, format='xml', decode=True) @retry_upon_exception(exceptions.RequestBad) def delete_vdr_internal_interface(self, edge_id, interface_index): uri = "%s/%s/interfaces/%d" % (URI_PREFIX, edge_id, interface_index) return self.do_request(HTTP_DELETE, uri, decode=True) def get_interfaces(self, edge_id): uri = "%s/%s/vnics" % (URI_PREFIX, edge_id) return self.do_request(HTTP_GET, uri, decode=True) @retry_upon_exception(exceptions.RequestBad) def update_interface(self, edge_id, vnic): uri = "%s/%s/vnics/%d" % (URI_PREFIX, edge_id, vnic['index']) return self.do_request(HTTP_PUT, uri, vnic, decode=True) def delete_interface(self, edge_id, vnic_index): uri = "%s/%s/vnics/%d" % (URI_PREFIX, edge_id, vnic_index) return self.do_request(HTTP_DELETE, uri, decode=True) def get_nat_config(self, edge_id): uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id) return self.do_request(HTTP_GET, uri, decode=True) def update_nat_config(self, edge_id, nat): uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id) return self.do_request(HTTP_PUT, uri, nat, decode=True) def delete_nat_rule(self, edge_id, rule_id): uri = "%s/%s/nat/config/rules/%s" % (URI_PREFIX, edge_id, rule_id) return self.do_request(HTTP_DELETE, uri, decode=True) def get_edge_status(self, edge_id): uri = "%s/%s/status?getlatest=false" % (URI_PREFIX, edge_id) return self.do_request(HTTP_GET, uri, decode=True) def get_edge(self, edge_id): uri = "%s/%s" % (URI_PREFIX, edge_id) return self.do_request(HTTP_GET, uri, decode=True) def _get_edges(self, startindex=0): uri = '%s?startIndex=%d' % (URI_PREFIX, startindex) return self.do_request(HTTP_GET, uri, decode=True) def get_edges(self): edges = [] h, d = self._get_edges() edges.extend(d['edgePage']['data']) paging_info = d['edgePage']['pagingInfo'] page_size = int(paging_info['pageSize']) count = int(paging_info['totalCount']) LOG.debug("There are total %s edges and page size is %s", count, page_size) pages = count / page_size + 1 for i in range(1, pages): start_index = page_size * i h, d = self._get_edges(start_index) edges.extend(d['edgePage']['data']) return edges def get_edge_syslog(self, edge_id): uri = "%s/%s/syslog/config" % (URI_PREFIX, edge_id) return self.do_request(HTTP_GET, uri, decode=True) def update_edge_syslog(self, edge_id, config): uri = "%s/%s/syslog/config" % (URI_PREFIX, edge_id) return self.do_request(HTTP_PUT, uri, config) def delete_edge_syslog(self, edge_id): uri = "%s/%s/syslog/config" % (URI_PREFIX, edge_id) return self.do_request(HTTP_DELETE, uri) def update_edge_config_with_modifier(self, edge_id, module, modifier): uri = "%s/%s/%s/config" % (URI_PREFIX, edge_id, module) config = self.do_request(HTTP_GET, uri)[1] if modifier(config): return self.do_request(HTTP_PUT, uri, config) def get_edge_interfaces(self, edge_id): uri = "%s/%s/interfaces" % (URI_PREFIX, edge_id) return self.do_request(HTTP_GET, uri, decode=True) def get_routes(self, edge_id): uri = "%s/%s/routing/config/static" % (URI_PREFIX, edge_id) return self.do_request(HTTP_GET, uri) def update_routes(self, edge_id, routes): uri = "%s/%s/routing/config/static" % (URI_PREFIX, edge_id) return self.do_request(HTTP_PUT, uri, routes) def create_lswitch(self, lsconfig): uri = "/api/ws.v1/lswitch" return self.do_request(HTTP_POST, uri, lsconfig, decode=True) def delete_lswitch(self, lswitch_id): uri = "/api/ws.v1/lswitch/%s" % lswitch_id return self.do_request(HTTP_DELETE, uri) def get_loadbalancer_config(self, edge_id): uri = self._build_uri_path(edge_id, LOADBALANCER_SERVICE) return self.do_request(HTTP_GET, uri, decode=True) def get_loadbalancer_statistics(self, edge_id): uri = self._build_uri_path(edge_id, LOADBALANCER_STATS) return self.do_request(HTTP_GET, uri, decode=True) def enable_service_loadbalancer(self, edge_id, config): uri = self._build_uri_path(edge_id, LOADBALANCER_SERVICE) return self.do_request(HTTP_PUT, uri, config) def sync_firewall(self): for cluster_id in cfg.CONF.nsxv.cluster_moid: uri = '/api/4.0/firewall/forceSync/%s' % cluster_id self.do_request(HTTP_POST, uri) def update_firewall(self, edge_id, fw_req): uri = self._build_uri_path( edge_id, FIREWALL_SERVICE) return self.do_request(HTTP_PUT, uri, fw_req) def delete_firewall(self, edge_id): uri = self._build_uri_path( edge_id, FIREWALL_SERVICE, None) return self.do_request(HTTP_DELETE, uri) def update_firewall_rule(self, edge_id, vcns_rule_id, fwr_req): uri = self._build_uri_path( edge_id, FIREWALL_SERVICE, FIREWALL_RULE_RESOURCE, vcns_rule_id) return self.do_request(HTTP_PUT, uri, fwr_req) def delete_firewall_rule(self, edge_id, vcns_rule_id): uri = self._build_uri_path( edge_id, FIREWALL_SERVICE, FIREWALL_RULE_RESOURCE, vcns_rule_id) return self.do_request(HTTP_DELETE, uri) def add_firewall_rule_above(self, edge_id, ref_vcns_rule_id, fwr_req): uri = self._build_uri_path( edge_id, FIREWALL_SERVICE, FIREWALL_RULE_RESOURCE) uri += "?aboveRuleId=" + ref_vcns_rule_id return self.do_request(HTTP_POST, uri, fwr_req) def add_firewall_rule(self, edge_id, fwr_req): uri = self._build_uri_path( edge_id, FIREWALL_SERVICE, FIREWALL_RULE_RESOURCE) return self.do_request(HTTP_POST, uri, fwr_req) def update_firewall_default_policy(self, edge_id, fw_req): uri = self._build_uri_path( edge_id, FIREWALL_SERVICE, 'defaultpolicy') return self.do_request(HTTP_PUT, uri, fw_req) def get_firewall(self, edge_id): uri = self._build_uri_path(edge_id, FIREWALL_SERVICE) return self.do_request(HTTP_GET, uri, decode=True) def get_firewall_rule(self, edge_id, vcns_rule_id): uri = self._build_uri_path( edge_id, FIREWALL_SERVICE, FIREWALL_RULE_RESOURCE, vcns_rule_id) return self.do_request(HTTP_GET, uri, decode=True) # #Edge LBAAS call helper # def create_vip(self, edge_id, vip_new): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, VIP_RESOURCE) return self.do_request(HTTP_POST, uri, vip_new) def get_vip(self, edge_id, vip_vseid): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, VIP_RESOURCE, vip_vseid) return self.do_request(HTTP_GET, uri, decode=True) def update_vip(self, edge_id, vip_vseid, vip_new): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, VIP_RESOURCE, vip_vseid) return self.do_request(HTTP_PUT, uri, vip_new) def delete_vip(self, edge_id, vip_vseid): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, VIP_RESOURCE, vip_vseid) return self.do_request(HTTP_DELETE, uri) def create_pool(self, edge_id, pool_new): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, POOL_RESOURCE) return self.do_request(HTTP_POST, uri, pool_new) def get_pool(self, edge_id, pool_vseid): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, POOL_RESOURCE, pool_vseid) return self.do_request(HTTP_GET, uri, decode=True) def update_pool(self, edge_id, pool_vseid, pool_new): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, POOL_RESOURCE, pool_vseid) return self.do_request(HTTP_PUT, uri, pool_new) def delete_pool(self, edge_id, pool_vseid): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, POOL_RESOURCE, pool_vseid) return self.do_request(HTTP_DELETE, uri) def create_health_monitor(self, edge_id, monitor_new): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, MONITOR_RESOURCE) return self.do_request(HTTP_POST, uri, monitor_new) def get_health_monitor(self, edge_id, monitor_vseid): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, MONITOR_RESOURCE, monitor_vseid) return self.do_request(HTTP_GET, uri, decode=True) def update_health_monitor(self, edge_id, monitor_vseid, monitor_new): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, MONITOR_RESOURCE, monitor_vseid) return self.do_request(HTTP_PUT, uri, monitor_new) def delete_health_monitor(self, edge_id, monitor_vseid): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, MONITOR_RESOURCE, monitor_vseid) return self.do_request(HTTP_DELETE, uri) def create_app_profile(self, edge_id, app_profile): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, APP_PROFILE_RESOURCE) return self.do_request(HTTP_POST, uri, app_profile) def update_app_profile(self, edge_id, app_profileid, app_profile): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, APP_PROFILE_RESOURCE, app_profileid) return self.do_request(HTTP_PUT, uri, app_profile) def delete_app_profile(self, edge_id, app_profileid): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, APP_PROFILE_RESOURCE, app_profileid) return self.do_request(HTTP_DELETE, uri) def create_app_rule(self, edge_id, app_rule): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, APP_RULE_RESOURCE) return self.do_request(HTTP_POST, uri, app_rule) def update_app_rule(self, edge_id, app_ruleid, app_rule): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, APP_RULE_RESOURCE, app_ruleid) return self.do_request(HTTP_PUT, uri, app_rule) def delete_app_rule(self, edge_id, app_ruleid): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, APP_RULE_RESOURCE, app_ruleid) return self.do_request(HTTP_DELETE, uri) def update_ipsec_config(self, edge_id, ipsec_config): uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE) return self.do_request(HTTP_PUT, uri, ipsec_config) def delete_ipsec_config(self, edge_id): uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE) return self.do_request(HTTP_DELETE, uri) def get_ipsec_config(self, edge_id): uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE) return self.do_request(HTTP_GET, uri) @retry_upon_exception(exceptions.RequestBad) def create_virtual_wire(self, vdn_scope_id, request): """Creates a VXLAN virtual wire The method will return the virtual wire ID. """ uri = '/api/2.0/vdn/scopes/%s/virtualwires' % vdn_scope_id return self.do_request(HTTP_POST, uri, request, format='xml', decode=False) def delete_virtual_wire(self, virtualwire_id): """Deletes a virtual wire.""" uri = '/api/2.0/vdn/virtualwires/%s' % virtualwire_id return self.do_request(HTTP_DELETE, uri, format='xml') def create_port_group(self, dvs_id, request): """Creates a port group on a DVS The method will return the port group ID. """ uri = '/api/2.0/xvs/switches/%s/networks' % dvs_id return self.do_request(HTTP_POST, uri, request, format='xml', decode=False) def delete_port_group(self, dvs_id, portgroup_id): """Deletes a portgroup.""" uri = '/api/2.0/xvs/switches/%s/networks/%s' % (dvs_id, portgroup_id) return self.do_request(HTTP_DELETE, uri, format='xml', decode=False) def get_vdn_switch(self, dvs_id): uri = '/api/2.0/vdn/switches/%s' % dvs_id return self.do_request(HTTP_GET, uri, decode=True) def update_vdn_switch(self, switch): uri = '/api/2.0/vdn/switches' return self.do_request(HTTP_PUT, uri, switch) def query_interface(self, edge_id, vnic_index): uri = "%s/%s/vnics/%d" % (URI_PREFIX, edge_id, vnic_index) return self.do_request(HTTP_GET, uri, decode=True) def reconfigure_dhcp_service(self, edge_id, request_config): """Reconfigure dhcp static bindings in the created Edge.""" uri = "/api/4.0/edges/%s/dhcp/config" % edge_id return self.do_request(HTTP_PUT, uri, request_config) def query_dhcp_configuration(self, edge_id): """Query DHCP configuration from the specific edge.""" uri = "/api/4.0/edges/%s/dhcp/config" % edge_id return self.do_request(HTTP_GET, uri) def create_dhcp_binding(self, edge_id, request_config): """Append one dhcp static binding on the edge.""" uri = self._build_uri_path(edge_id, DHCP_SERVICE, DHCP_BINDING_RESOURCE) return self.do_request(HTTP_POST, uri, request_config, decode=False) def delete_dhcp_binding(self, edge_id, binding_id): """Delete one dhcp static binding on the edge.""" uri = self._build_uri_path(edge_id, DHCP_SERVICE, DHCP_BINDING_RESOURCE, binding_id) return self.do_request(HTTP_DELETE, uri, decode=False) def get_dhcp_binding(self, edge_id, binding_id): """Get a dhcp static binding from the edge.""" uri = self._build_uri_path(edge_id, DHCP_SERVICE, DHCP_BINDING_RESOURCE, binding_id) return self.do_request(HTTP_GET, uri, decode=True) def create_security_group(self, request): """Creates a security group container in nsx. The method will return the security group ID. """ uri = '%s/globalroot-0' % (SECURITYGROUP_PREFIX) return self.do_request(HTTP_POST, uri, request, format='xml', decode=False) def delete_security_group(self, securitygroup_id): """Deletes a security group container.""" uri = '%s/%s?force=true' % (SECURITYGROUP_PREFIX, securitygroup_id) return self.do_request(HTTP_DELETE, uri, format='xml', decode=False) def update_security_group(self, sg_id, sg_name, description): """Updates the NSXv security group name.""" uri = '%s/%s' % (SECURITYGROUP_PREFIX, sg_id) h, c = self.do_request(HTTP_GET, uri, format='xml', decode=False) sg = et.fromstring(c) sg.find('name').text = sg_name sg.find('description').text = description return self.do_request(HTTP_PUT, uri, et.tostring(sg), format='xml', decode=False, encode=False) def list_security_groups(self): uri = '%s/scope/globalroot-0' % SECURITYGROUP_PREFIX return self.do_request(HTTP_GET, uri, format='xml', decode=False) def get_security_group_id(self, sg_name): """Returns NSXv security group id which match the given name.""" h, secgroups = self.list_security_groups() root = utils.normalize_xml(secgroups) for sg in root.iter('securitygroup'): if sg.find('name').text == sg_name: return sg.find('objectId').text @retry_upon_exception(exceptions.VcnsApiException) def create_bridge(self, edge_id, request): """Create a bridge.""" uri = self._build_uri_path(edge_id, BRIDGE) return self.do_request(HTTP_PUT, uri, request, format='xml', decode=False) @retry_upon_exception(exceptions.VcnsApiException) def delete_bridge(self, edge_id): """Delete a bridge.""" uri = self._build_uri_path(edge_id, BRIDGE) return self.do_request(HTTP_DELETE, uri, format='xml', decode=False) def create_redirect_section(self, request): """Creates a layer 3 redirect section in nsx rule table. The method will return the uri to newly created section. """ sec_type = FIREWALL_REDIRECT_SEC_TYPE uri = '%s/%s?autoSaveDraft=false' % (FIREWALL_PREFIX, sec_type) uri += '&operation=insert_before&anchorId=1002' return self.do_request(HTTP_POST, uri, request, format='xml', decode=False, encode=False) def create_section(self, type, request, insert_top=False, insert_before=None): """Creates a layer 3 or layer 2 section in nsx rule table. The method will return the uri to newly created section. """ if type == 'ip': sec_type = 'layer3sections' else: sec_type = 'layer2sections' uri = '%s/%s?autoSaveDraft=false' % (FIREWALL_PREFIX, sec_type) if insert_top: uri += '&operation=insert_top' # We want to place security-group sections before the default cluster # section, and we want to place the default cluster section before the # global default section. elif insert_before: uri += '&operation=insert_before&anchorId=%s' % insert_before else: uri += '&operation=insert_before&anchorId=1003' return self.do_request(HTTP_POST, uri, request, format='xml', decode=False, encode=False) def update_section(self, section_uri, request, h): """Replaces a section in nsx rule table.""" uri = '%s?autoSaveDraft=false' % section_uri headers = self._get_section_header(section_uri, h) return self.do_request(HTTP_PUT, uri, request, format='xml', decode=False, encode=False, headers=headers) def delete_section(self, section_uri): """Deletes a section in nsx rule table.""" uri = '%s?autoSaveDraft=false' % section_uri return self.do_request(HTTP_DELETE, uri, format='xml', decode=False) def get_section(self, section_uri): return self.do_request(HTTP_GET, section_uri, format='xml', decode=False) def get_default_l3_id(self): """Retrieve the id of the default l3 section.""" h, firewall_config = self.get_dfw_config() root = utils.normalize_xml(firewall_config) for child in root: if str(child.tag) == 'layer3Sections': sections = list(child.iter('section')) default = sections[-1] return default.attrib['id'] def get_dfw_config(self): uri = FIREWALL_PREFIX return self.do_request(HTTP_GET, uri, decode=False, format='xml') def update_dfw_config(self, request, h): uri = FIREWALL_PREFIX headers = self._get_section_header(None, h) return self.do_request(HTTP_PUT, uri, request, format='xml', decode=False, encode=False, headers=headers) def get_section_id(self, section_name): """Retrieve the id of a section from nsx.""" h, firewall_config = self.get_dfw_config() root = utils.normalize_xml(firewall_config) for sec in root.iter('section'): if sec.attrib['name'] == section_name: return sec.attrib['id'] def update_section_by_id(self, id, type, request): """Update a section while building its uri from the id.""" if type == 'ip': sec_type = 'layer3sections' else: sec_type = 'layer2sections' section_uri = '%s/%s/%s' % (FIREWALL_PREFIX, sec_type, id) self.update_section(section_uri, request, h=None) def _get_section_header(self, section_uri, h=None): if h is None: h, c = self.do_request(HTTP_GET, section_uri, format='xml', decode=False) etag = h['etag'] headers = {'If-Match': etag} return headers def remove_rule_from_section(self, section_uri, rule_id): """Deletes a rule from nsx section table.""" uri = '%s/rules/%s?autoSaveDraft=false' % (section_uri, rule_id) headers = self._get_section_header(section_uri) return self.do_request(HTTP_DELETE, uri, format='xml', headers=headers) @retry_upon_exception(exceptions.RequestBad) def add_member_to_security_group(self, security_group_id, member_id): """Adds a vnic member to nsx security group.""" uri = '%s/%s/members/%s?failIfExists=false' % ( SECURITYGROUP_PREFIX, security_group_id, member_id) return self.do_request(HTTP_PUT, uri, format='xml', decode=False) def remove_member_from_security_group(self, security_group_id, member_id): """Removes a vnic member from nsx security group.""" uri = '%s/%s/members/%s?failIfAbsent=false' % ( SECURITYGROUP_PREFIX, security_group_id, member_id) return self.do_request(HTTP_DELETE, uri, format='xml', decode=False) def set_system_control(self, edge_id, prop): uri = self._build_uri_path(edge_id, SYSCTL_SERVICE) payload = { 'featureType': 'systemcontrol', 'property': prop } return self.do_request(HTTP_PUT, uri, payload, decode=True) def get_system_control(self, edge_id): uri = self._build_uri_path(edge_id, SYSCTL_SERVICE) return self.do_request(HTTP_GET, uri) def _get_enforcement_point_body(self, enforcement_points): e_point_list = [] for enforcement_point in enforcement_points: e_point_list.append({ 'enforcementPoint': { 'id': enforcement_point, 'type': enforcement_point.split('-')[0] } }) return {'__enforcementPoints': e_point_list} @retry_upon_exception_exclude_error_codes( exceptions.RequestBad, [constants.NSX_ERROR_ALREADY_HAS_SG_POLICY]) def create_spoofguard_policy(self, enforcement_points, name, enable): uri = '%s/policies/' % SPOOFGUARD_PREFIX body = {'spoofguardPolicy': {'name': name, 'operationMode': 'MANUAL' if enable else 'DISABLE', 'allowLocalIPs': 'true'}} body['spoofguardPolicy'].update( self._get_enforcement_point_body(enforcement_points)) return self.do_request(HTTP_POST, uri, body, format='xml', encode=True, decode=False) @retry_upon_exception(exceptions.RequestBad) def update_spoofguard_policy(self, policy_id, enforcement_points, name, enable): update_uri = '%s/policies/%s' % (SPOOFGUARD_PREFIX, policy_id) publish_uri = '%s/%s?action=publish' % (SPOOFGUARD_PREFIX, policy_id) body = {'spoofguardPolicy': {'policyId': policy_id, 'name': name, 'operationMode': 'MANUAL' if enable else 'DISABLE', 'allowLocalIPs': 'true'}} body['spoofguardPolicy'].update( self._get_enforcement_point_body(enforcement_points)) self.do_request(HTTP_PUT, update_uri, body, format='xml', encode=True, decode=False) return self.do_request(HTTP_POST, publish_uri, decode=False) @retry_upon_exception(exceptions.RequestBad) def delete_spoofguard_policy(self, policy_id): uri = '%s/policies/%s' % (SPOOFGUARD_PREFIX, policy_id) return self.do_request(HTTP_DELETE, uri, decode=False) def get_spoofguard_policy(self, policy_id): uri = '%s/policies/%s' % (SPOOFGUARD_PREFIX, policy_id) return self.do_request(HTTP_GET, uri, decode=True) def get_spoofguard_policies(self): uri = '%s/policies/' % SPOOFGUARD_PREFIX return self.do_request(HTTP_GET, uri, decode=True) def _approve_assigned_addresses(self, policy_id, vnic_id, mac_addr, addresses): uri = '%s/%s' % (SPOOFGUARD_PREFIX, policy_id) addresses = [{'ipAddress': ip_addr} for ip_addr in addresses] body = {'spoofguardList': {'spoofguard': {'id': vnic_id, 'vnicUuid': vnic_id, 'approvedIpAddress': addresses, 'approvedMacAddress': mac_addr, 'publishedIpAddress': addresses, 'publishedMacAddress': mac_addr}}} try: return self.do_request(HTTP_POST, '%s?action=approve' % uri, body, format='xml', decode=False) except exceptions.VcnsApiException as e: nsx_errcode = self.xmlapi_client._get_nsx_errorcode(e.response) if nsx_errcode == constants.NSX_ERROR_ALREADY_EXISTS: LOG.warning("Spoofguard entry for %s already exists", vnic_id) raise exceptions.AlreadyExists(resource=vnic_id) # raise original exception for retries raise @retry_upon_exception(exceptions.RequestBad) def approve_assigned_addresses(self, policy_id, vnic_id, mac_addr, addresses): return self._approve_assigned_addresses( policy_id, vnic_id, mac_addr, addresses) @retry_upon_exception(exceptions.VcnsApiException) def publish_assigned_addresses(self, policy_id, vnic_id): uri = '%s/%s' % (SPOOFGUARD_PREFIX, policy_id) publish_vnic_uri = '%s?action=publish&vnicId=%s' % (uri, vnic_id) return self.do_request(HTTP_POST, publish_vnic_uri, decode=False) def inactivate_vnic_assigned_addresses(self, policy_id, vnic_id): try: self._approve_assigned_addresses(policy_id, vnic_id, '', []) except exceptions.RequestBad: LOG.debug("Request failed: inactivate vnic %s assigned addresses", vnic_id) else: return self.publish_assigned_addresses(policy_id, vnic_id) def _build_uri_path(self, edge_id, service, resource=None, resource_id=None, parent_resource_id=None, fields=None, relations=None, filters=None, types=None, is_attachment=False, is_async=False): uri_prefix = "%s/%s/%s" % (URI_PREFIX, edge_id, service) if resource: res_path = resource + (resource_id and "/%s" % resource_id or '') uri_path = "%s/%s" % (uri_prefix, res_path) else: uri_path = uri_prefix if is_async: return (uri_path + "?async=true") else: return uri_path def add_vm_to_exclude_list(self, vm_id): uri = '%s/%s' % (EXCLUDELIST_PREFIX, vm_id) return self.do_request(HTTP_PUT, uri) def delete_vm_from_exclude_list(self, vm_id): uri = '%s/%s' % (EXCLUDELIST_PREFIX, vm_id) return self.do_request(HTTP_DELETE, uri) def get_scoping_objects(self): uri = '%s/usermgmt/scopingobjects' % SERVICES_PREFIX h, scoping_objects = self.do_request(HTTP_GET, uri, decode=False, format='xml') return scoping_objects def _scopingobjects_lookup(self, type_names, object_id, name=None, use_cache=False): """Look for a specific object in the NSX scoping objects.""" # used cached scoping objects during plugin init since it is # a big structure to retrieve and parse each time. if use_cache and self._normalized_scoping_objects is not None: # Use the cached data root = self._normalized_scoping_objects else: # Not using cache, or we do want to use it, # but it was not saved yet: # So get the data from the NSX and parse it so_list = self.get_scoping_objects() root = utils.normalize_xml(so_list) # Save it for possible usage next time (even if not using cache) self._normalized_scoping_objects = root for obj in root.iter('object'): if (obj.find('objectTypeName').text in type_names and obj.find('objectId').text == object_id and (name is None or obj.find('name').text == name)): return True return False def validate_datacenter_moid(self, object_id, during_init=False): return self._scopingobjects_lookup(['Datacenter'], object_id, use_cache=during_init) def validate_network(self, object_id, during_init=False): return self._scopingobjects_lookup(NETWORK_TYPES, object_id, use_cache=during_init) def validate_network_name(self, object_id, name, during_init=False): return self._scopingobjects_lookup(NETWORK_TYPES, object_id, name=name, use_cache=during_init) def validate_vdn_scope(self, object_id): uri = '%s/scopes' % VDN_PREFIX h, scope_list = self.do_request(HTTP_GET, uri, decode=False, format='xml') root = utils.normalize_xml(scope_list) for obj_id in root.iter('objectId'): if obj_id.text == object_id: return True return False def get_dvs_list(self): uri = '%s/switches' % VDN_PREFIX h, dvs_list = self.do_request(HTTP_GET, uri, decode=False, format='xml') root = utils.normalize_xml(dvs_list) dvs_list = [] for obj_id in root.iter('objectId'): if obj_id.text: dvs_list.append(obj_id.text) return dvs_list def validate_dvs(self, object_id, dvs_list=None): if not dvs_list: dvs_list = self.get_dvs_list() for dvs in dvs_list: if dvs == object_id: return True return False def validate_inventory(self, object_id): uri = '%s/inventory/%s/basicinfo' % (SERVICES_PREFIX, object_id) try: h, c = self.do_request(HTTP_GET, uri, decode=False) except exceptions.ResourceNotFound: return False return True def get_inventory_name(self, object_id): uri = '%s/inventory/%s/basicinfo' % (SERVICES_PREFIX, object_id) h, c = self.do_request(HTTP_GET, uri, decode=True) return c['name'] def _get_version(self): uri = '/api/2.0/services/vsmconfig' h, c = self.do_request(HTTP_GET, uri, decode=True) version = c['version'] LOG.debug("NSX Version: %s", version) return version def get_version(self): if self._nsx_version is None: try: self._nsx_version = self._get_version() except Exception as e: # Versions prior to 6.2.0 do not support the above API LOG.error("Unable to get NSX version. Exception: %s", e) # Minimum supported version is 6.1 self._nsx_version = '6.1' return self._nsx_version def get_tuning_configration(self): uri = '/api/4.0/edgePublish/tuningConfiguration' h, c = self.do_request(HTTP_GET, uri, decode=True) return c def configure_aggregate_publishing(self): uri = "/api/4.0/edgePublish/tuningConfiguration" # Ensure that configured values are not changed config = self.get_tuning_configration() LOG.debug("Tuning configuration: %s", config) tuning = et.Element('tuningConfiguration') for opt, val in six.iteritems(config): child = et.Element(opt) if opt == 'aggregatePublishing': child.text = 'true' else: child.text = str(val) tuning.append(child) return self.do_request(HTTP_PUT, uri, et.tostring(tuning), format='xml', decode=True) def configure_reservations(self): uri = "/api/4.0/edgePublish/tuningConfiguration" config = self.get_tuning_configration() tuning = et.Element('tuningConfiguration') for opt, val in six.iteritems(config): child = et.Element(opt) if (opt == 'edgeVCpuReservationPercentage' or opt == 'edgeMemoryReservationPercentage'): child.text = '0' elif opt == 'megaHertzPerVCpu': child.text = '1500' else: child.text = str(val) tuning.append(child) return self.do_request(HTTP_PUT, uri, et.tostring(tuning), format='xml', decode=True) def enable_ha(self, edge_id, request_config): """Enable HA in the given edge.""" uri = "/api/4.0/edges/%s/highavailability/config" % edge_id return self.do_request(HTTP_PUT, uri, request_config) def change_edge_appliance_size(self, edge_id, size): """Change the size of edge appliances.""" uri = ("/api/4.0/edges/%s/appliances/?size=%s" % (edge_id, size)) return self.do_request(HTTP_POST, uri) def change_edge_appliance(self, edge_id, request): uri = "/api/4.0/edges/%s/appliances" % edge_id return self.do_request(HTTP_PUT, uri, request) def get_edge_appliances(self, edge_id): uri = "/api/4.0/edges/%s/appliances" % edge_id return self.do_request(HTTP_GET, uri) def upload_edge_certificate(self, edge_id, request): """Creates a certificate on the specified Edge appliance.""" uri = '%s/%s/%s' % (TRUSTSTORE_PREFIX, CERTIFICATE, edge_id) return self.do_request(HTTP_POST, uri, request, decode=True) def create_csr(self, edge_id, request=nsxv_constants.CSR_REQUEST): """Create a CSR on the specified Edge appliance.""" uri = '%s/%s/%s' % (TRUSTSTORE_PREFIX, CSR, edge_id) return self.do_request(HTTP_POST, uri, request, format='xml', decode=False) def create_csr_cert(self, csr_id): """Create a CSR self signed cert on the specified Edge appliance.""" uri = '%s/%s/%s?noOfDays=%s' % (TRUSTSTORE_PREFIX, CSR, csr_id, nsxv_constants.CERT_NUMBER_OF_DAYS) return self.do_request(HTTP_PUT, uri) def get_service_insertion_profile(self, profile_id): profiles_uri = '%s/%s' % (SERVICE_INSERTION_PROFILE_PREFIX, profile_id) return self.do_request(HTTP_GET, profiles_uri, format='xml', decode=False) def update_service_insertion_profile_binding(self, profile_id, request): profiles_uri = '%s/%s/%s' % (SERVICE_INSERTION_PROFILE_PREFIX, profile_id, 'binding') return self.do_request(HTTP_POST, profiles_uri, request, format='xml', decode=False) def create_ipam_ip_pool(self, request): uri = '%s/%s/%s' % (SERVICES_PREFIX, IPAM_POOL_SERVICE, IPAM_POOL_SCOPE) return self.do_request(HTTP_POST, uri, request, format='xml', decode=False) def delete_ipam_ip_pool(self, pool_id): uri = '%s/%s/%s' % (SERVICES_PREFIX, IPAM_POOL_SERVICE, pool_id) return self.do_request(HTTP_DELETE, uri) def get_ipam_ip_pool(self, pool_id): uri = '%s/%s/%s' % (SERVICES_PREFIX, IPAM_POOL_SERVICE, pool_id) return self.do_request(HTTP_GET, uri, decode=True) def allocate_ipam_ip_from_pool(self, pool_id, ip_addr=None): uri = '%s/%s/%s/%s' % (SERVICES_PREFIX, IPAM_POOL_SERVICE, pool_id, 'ipaddresses') if ip_addr: request = {'ipAddressRequest': {'allocationMode': 'RESERVE', 'ipAddress': ip_addr}} else: request = {'ipAddressRequest': {'allocationMode': 'ALLOCATE'}} return self.do_request(HTTP_POST, uri, request, format='xml', decode=False) def release_ipam_ip_to_pool(self, pool_id, ip_addr): uri = '%s/%s/%s/%s/%s' % (SERVICES_PREFIX, IPAM_POOL_SERVICE, pool_id, 'ipaddresses', ip_addr) return self.do_request(HTTP_DELETE, uri) def get_security_policy(self, policy_id, return_xml=True): # get the policy configuration as an xml string / dictionary uri = '%s/%s' % (SECURITY_POLICY_PREFIX, policy_id) if return_xml: format = 'xml' decode = False else: format = 'json' decode = True h, policy = self.do_request(HTTP_GET, uri, format=format, decode=decode) return policy def update_security_policy(self, policy_id, request): # update the policy configuration. request should be an xml string uri = '%s/%s' % (SECURITY_POLICY_PREFIX, policy_id) return self.do_request(HTTP_PUT, uri, request, format='xml', decode=False, encode=True) def get_security_policies(self): # get the policies configuration dictionary uri = '%s/all' % (SECURITY_POLICY_PREFIX) h, policies = self.do_request(HTTP_GET, uri, decode=True) return policies def list_applications(self): uri = '%s/scope/globalroot-0' % APPLICATION_PREFIX h, apps = self.do_request(HTTP_GET, uri, decode=True) return apps def update_edge_routing_config(self, edge_id, request_config): uri = self._build_uri_path(edge_id, ROUTING_CONFIG) return self.do_request(HTTP_PUT, uri, VcnsApiClient.xmldumps(request_config), format='xml') def get_edge_routing_config(self, edge_id): uri = self._build_uri_path(edge_id, ROUTING_CONFIG) return self.do_request(HTTP_GET, uri) def update_bgp_dynamic_routing(self, edge_id, bgp_request): uri = self._build_uri_path(edge_id, BGP_ROUTING_CONFIG) return self.do_request(HTTP_PUT, uri, VcnsApiClient.xmldumps(bgp_request), format='xml') def get_bgp_routing_config(self, edge_id): uri = self._build_uri_path(edge_id, BGP_ROUTING_CONFIG) return self.do_request(HTTP_GET, uri) def delete_bgp_routing_config(self, edge_id): uri = self._build_uri_path(edge_id, BGP_ROUTING_CONFIG) return self.do_request(HTTP_DELETE, uri) def get_global_objects(self): uri = '%s/application/scope/globalroot-0' % SERVICES_PREFIX h, scoping_objects = self.do_request(HTTP_GET, uri, decode=False, format='xml') return scoping_objects def _globalobjects_lookup(self, name, use_cache=False): """Return objectId a specific name in the NSX global objects.""" # used cached scoping objects during plugin init since it is # a big structure to retrieve and parse each time. if use_cache and self._normalized_global_objects is not None: # Use the cached data root = self._normalized_global_objects else: # Not using cache, or we do want to use it, # but it was not saved yet: # So get the data from the NSX and parse it so_list = self.get_global_objects() root = utils.normalize_xml(so_list) # Save it for possible usage next time (even if not using cache) self._normalized_global_objects = root for obj in root.iter('application'): if obj.find('name').text == name: return obj.find('objectId').text def get_application_id(self, name): return self._globalobjects_lookup(name, use_cache=True) vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/securitygroup_utils.py0000666000175100017510000002021013244523345027113 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import xml.etree.ElementTree as et from oslo_log import log as logging from vmware_nsx.common import utils WAIT_INTERVAL = 2000 MAX_ATTEMPTS = 5 LOG = logging.getLogger(__name__) class NsxSecurityGroupUtils(object): def __init__(self, nsxv_manager): LOG.debug("Start Security Group Utils initialization") self.nsxv_manager = nsxv_manager def to_xml_string(self, element): return et.tostring(element) def get_section_with_rules(self, name, rules, section_id=None): """Helper method to create section dict with rules.""" section = et.Element('section') section.attrib['name'] = name if section_id: section.attrib['id'] = section_id for rule in rules: section.append(rule) return section def get_container(self, nsx_sg_id): container = {'type': 'SecurityGroup', 'value': nsx_sg_id} return container def get_remote_container(self, remote_group_id, remote_ip_mac): container = None if remote_group_id is not None: return self.get_container(remote_group_id) if remote_ip_mac is not None: container = {'type': 'Ipv4Address', 'value': remote_ip_mac} return container def get_rule_config(self, applied_to_ids, name, action='allow', applied_to='SecurityGroup', source=None, destination=None, services=None, flags=None, logged=False, tag=None, application_services=None): """Helper method to create a nsx rule dict.""" ruleTag = et.Element('rule') ruleTag.attrib['logged'] = 'true' if logged else 'false' nameTag = et.SubElement(ruleTag, 'name') nameTag.text = name actionTag = et.SubElement(ruleTag, 'action') actionTag.text = action apList = et.SubElement(ruleTag, 'appliedToList') for applied_to_id in applied_to_ids: apTag = et.SubElement(apList, 'appliedTo') apTypeTag = et.SubElement(apTag, 'type') apTypeTag.text = applied_to apValueTag = et.SubElement(apTag, 'value') apValueTag.text = applied_to_id if source is not None: sources = et.SubElement(ruleTag, 'sources') sources.attrib['excluded'] = 'false' srcTag = et.SubElement(sources, 'source') srcTypeTag = et.SubElement(srcTag, 'type') srcTypeTag.text = source['type'] srcValueTag = et.SubElement(srcTag, 'value') srcValueTag.text = source['value'] if destination is not None: dests = et.SubElement(ruleTag, 'destinations') dests.attrib['excluded'] = 'false' destTag = et.SubElement(dests, 'destination') destTypeTag = et.SubElement(destTag, 'type') destTypeTag.text = destination['type'] destValueTag = et.SubElement(destTag, 'value') destValueTag.text = destination['value'] if services: s = et.SubElement(ruleTag, 'services') for protocol, port, icmptype, icmpcode in services: svcTag = et.SubElement(s, 'service') try: int(protocol) svcProtocolTag = et.SubElement(svcTag, 'protocol') svcProtocolTag.text = str(protocol) except ValueError: svcProtocolTag = et.SubElement(svcTag, 'protocolName') svcProtocolTag.text = protocol if port is not None: svcPortTag = et.SubElement(svcTag, 'destinationPort') svcPortTag.text = str(port) if icmptype is not None: svcPortTag = et.SubElement(svcTag, 'subProtocol') svcPortTag.text = str(icmptype) if icmpcode is not None: svcPortTag = et.SubElement(svcTag, 'icmpCode') svcPortTag.text = str(icmpcode) if application_services: s = et.SubElement(ruleTag, 'services') for application_service in application_services: svcTag = et.SubElement(s, 'service') svcProtocolTag = et.SubElement(svcTag, 'value') svcProtocolTag.text = str(application_service) if flags: if flags.get('ethertype') is not None: pktTag = et.SubElement(ruleTag, 'packetType') pktTag.text = flags.get('ethertype') if flags.get('direction') is not None: dirTag = et.SubElement(ruleTag, 'direction') dirTag.text = flags.get('direction') if tag: tagTag = et.SubElement(ruleTag, 'tag') tagTag.text = tag return ruleTag def get_rule_id_pair_from_section(self, resp): root = et.fromstring(resp) pairs = [] for rule in root.findall('rule'): pair = {'nsx_id': rule.attrib.get('id'), 'neutron_id': rule.find('name').text} pairs.append(pair) return pairs def extend_section_with_rules(self, section, nsx_rules): section.extend(nsx_rules) def parse_section(self, xml_string): return et.fromstring(xml_string) def get_nsx_sg_name(self, sg_data): return '%(name)s (%(id)s)' % sg_data def get_nsx_section_name(self, sg_data): return 'SG Section: %s' % self.get_nsx_sg_name(sg_data) def parse_and_get_section_id(self, section_xml): section = et.fromstring(section_xml) return section.attrib['id'] def is_section_logged(self, section): # Determine if this section rules are being logged by the first rule # 'logged' value. rule = section.find('rule') if rule is not None: return rule.attrib.get('logged') == 'true' return False def set_rules_logged_option(self, section, logged): value = 'true' if logged else 'false' rules = section.findall('rule') updated = False for rule in rules: if rule.attrib['logged'] != value: rule.attrib['logged'] = value updated = True return updated def del_nsx_security_group_from_policy(self, policy_id, sg_id): if not policy_id: return policy = self.nsxv_manager.vcns.get_security_policy(policy_id) policy = utils.normalize_xml(policy) # check if the security group is already bounded to the policy for binding in policy.iter('securityGroupBinding'): if binding.find('objectId').text == sg_id: # delete this entry policy.remove(binding) return self.nsxv_manager.vcns.update_security_policy( policy_id, et.tostring(policy)) def add_nsx_security_group_to_policy(self, policy_id, sg_id): if not policy_id: return # Get the policy configuration policy = self.nsxv_manager.vcns.get_security_policy(policy_id) policy = utils.normalize_xml(policy) # check if the security group is already bounded to the policy for binding in policy.iter('securityGroupBinding'): if binding.find('objectId').text == sg_id: # Already there return # Add a new binding entry new_binding = et.SubElement(policy, 'securityGroupBinding') et.SubElement(new_binding, 'objectId').text = sg_id return self.nsxv_manager.vcns.update_security_policy( policy_id, et.tostring(policy)) vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/common/0000775000175100017510000000000013244524600023663 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/common/__init__.py0000666000175100017510000000000013244523345025771 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/common/constants.py0000666000175100017510000000526113244523345026264 0ustar zuulzuul00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from vmware_nsx.common import nsxv_constants EDGE_ID = 'edge_id' ROUTER_ID = 'router_id' DHCP_EDGE_PREFIX = 'dhcp-' PLR_EDGE_PREFIX = 'plr-' BACKUP_ROUTER_PREFIX = 'backup-' EDGE_NAME_LEN = 20 # Interface EXTERNAL_VNIC_INDEX = 0 INTERNAL_VNIC_INDEX = 1 EXTERNAL_VNIC_NAME = "external" INTERNAL_VNIC_NAME = "internal" MAX_VNIC_NUM = 10 # we can add at most 8 interfaces on service edge. Other two interfaces # are used for metadata and external network access. MAX_INTF_NUM = 8 MAX_TUNNEL_NUM = (cfg.CONF.nsxv.maximum_tunnels_per_vnic if (cfg.CONF.nsxv.maximum_tunnels_per_vnic < 110 and cfg.CONF.nsxv.maximum_tunnels_per_vnic > 0) else 10) # SNAT rule location PREPEND = 0 APPEND = -1 # error code NSX_ERROR_ALREADY_EXISTS = 210 VCNS_ERROR_CODE_EDGE_NOT_RUNNING = 10013 NSX_ERROR_DHCP_OVERLAPPING_IP = 12501 NSX_ERROR_DHCP_DUPLICATE_HOSTNAME = 12504 NSX_ERROR_DHCP_DUPLICATE_MAC = 12518 NSX_ERROR_IPAM_ALLOCATE_ALL_USED = 120051 NSX_ERROR_IPAM_ALLOCATE_IP_USED = 120056 NSX_ERROR_ALREADY_HAS_SG_POLICY = 120508 SUFFIX_LENGTH = 8 #Edge size SERVICE_SIZE_MAPPING = { 'router': nsxv_constants.COMPACT, 'dhcp': nsxv_constants.COMPACT, 'lb': nsxv_constants.COMPACT } ALLOWED_EDGE_SIZES = (nsxv_constants.COMPACT, nsxv_constants.LARGE, nsxv_constants.XLARGE, nsxv_constants.QUADLARGE) #Edge type ALLOWED_EDGE_TYPES = (nsxv_constants.SERVICE_EDGE, nsxv_constants.VDR_EDGE) SUPPORTED_DHCP_OPTIONS = { 'interface-mtu': 'option26', 'tftp-server-name': 'option66', 'bootfile-name': 'option67', 'classless-static-route': 'option121', 'tftp-server-address': 'option150', 'tftp-server': 'option150', } # router status by number class RouterStatus(object): ROUTER_STATUS_ACTIVE = 0 ROUTER_STATUS_DOWN = 1 ROUTER_STATUS_PENDING_CREATE = 2 ROUTER_STATUS_PENDING_DELETE = 3 ROUTER_STATUS_ERROR = 4 class InternalEdgePurposes(object): INTER_EDGE_PURPOSE = 'inter_edge_net' vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/common/VcnsApiClient.py0000666000175100017510000001352113244523345026750 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import os import xml.etree.ElementTree as et from oslo_context import context as context_utils from oslo_serialization import jsonutils import requests import six from vmware_nsx.plugins.nsx_v.vshield.common import exceptions def _xmldump(obj): """Sort of improved xml creation method. This converts the dict to xml with following assumptions: Keys starting with _(underscore) are to be used as attributes and not element keys starting with @ so that dict can be made. Keys starting with __(double underscore) are to be skipped and its value is processed. The keys are not part of any xml schema. """ config = "" attr = "" if isinstance(obj, dict): for key, value in six.iteritems(obj): if key.startswith('__'): # Skip the key and evaluate it's value. a, x = _xmldump(value) config += x elif key.startswith('_'): attr += ' %s="%s"' % (key[1:], value) else: a, x = _xmldump(value) if key.startswith('@'): cfg = "%s" % (x) else: cfg = "<%s%s>%s" % (key, a, x, key) config += cfg elif isinstance(obj, list): for value in obj: a, x = _xmldump(value) attr += a config += x else: config = obj return attr, config def xmldumps(obj): attr, xml = _xmldump(obj) return xml class VcnsApiHelper(object): errors = { 303: exceptions.ResourceRedirect, 400: exceptions.RequestBad, 403: exceptions.Forbidden, 404: exceptions.ResourceNotFound, 409: exceptions.ServiceConflict, 415: exceptions.MediaTypeUnsupport, 503: exceptions.ServiceUnavailable } nsx_errors = { # firewall rule doesn't exists for deletion. 100046: exceptions.ResourceNotFound, 100029: exceptions.ResourceNotFound, } def __init__(self, address, user, password, format='json', ca_file=None, insecure=True, timeout=None): self.authToken = base64.encodestring(six.b("%s:%s" % (user, password))) self.user = user self.passwd = password self.address = address self.format = format self.timeout = timeout if format == 'json': self.encode = jsonutils.dumps else: self.encode = xmldumps if insecure: self.verify_cert = False else: if ca_file: self.verify_cert = ca_file else: self.verify_cert = True self._session = None self._pid = None @property def session(self): if self._session is None or self._pid != os.getpid(): self._pid = os.getpid() self._session = requests.Session() return self._session def _get_nsx_errorcode(self, content): try: if self.format == 'xml': error = et.fromstring(content).find('errorCode') errcode = error is not None and int(error.text) else: # json error = jsonutils.loads(content) errcode = int(error.get('errorCode')) return errcode except (TypeError, ValueError, et.ParseError): # We won't assume that integer error-code value is guaranteed. return None def _get_request_id(self): ctx = context_utils.get_current() if ctx: return ctx.__dict__.get('request_id') def request(self, method, uri, params=None, headers=None, encodeparams=True, timeout=None): uri = self.address + uri if timeout is None: timeout = self.timeout if headers is None: headers = {} headers['Accept'] = 'application/' + self.format headers['Authorization'] = 'Basic ' + self.authToken.strip() headers['Content-Type'] = 'application/' + self.format request_id = self._get_request_id() if request_id: headers['TicketNumber'] = request_id if params: if encodeparams is True: data = self.encode(params) else: data = params else: data = None try: response = self.session.request(method, uri, verify=self.verify_cert, data=data, headers=headers, timeout=timeout) except requests.exceptions.Timeout: raise exceptions.ResourceTimedOut(uri=uri) status = response.status_code if 200 <= status < 300: return response.headers, response.text nsx_errcode = self._get_nsx_errorcode(response.text) if nsx_errcode in self.nsx_errors: cls = self.nsx_errors[nsx_errcode] elif status in self.errors: cls = self.errors[status] else: cls = exceptions.VcnsApiException raise cls(uri=uri, status=status, header=response.headers, response=response.text) vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/common/exceptions.py0000666000175100017510000000427613244523345026436 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions from vmware_nsx._i18n import _ class VcnsException(exceptions.NeutronException): pass class VcnsGeneralException(VcnsException): def __init__(self, message): self.message = message super(VcnsGeneralException, self).__init__() class VcnsBadRequest(exceptions.BadRequest): pass class VcnsNotFound(exceptions.NotFound): message = _('%(resource)s not found: %(msg)s') class VcnsApiException(VcnsException): message = _("An unknown exception %(status)s occurred: %(response)s.") def __init__(self, **kwargs): super(VcnsApiException, self).__init__(**kwargs) self.status = kwargs.get('status') self.header = kwargs.get('header') self.response = kwargs.get('response') class ResourceRedirect(VcnsApiException): message = _("Resource %(uri)s has been redirected") class RequestBad(VcnsApiException): message = _("Request %(uri)s is Bad, response %(response)s") class Forbidden(VcnsApiException): message = _("Forbidden: %(uri)s") class ResourceNotFound(VcnsApiException): message = _("Resource %(uri)s not found") class ResourceTimedOut(VcnsApiException): message = _("Resource %(uri)s timed out") class MediaTypeUnsupport(VcnsApiException): message = _("Media Type %(uri)s is not supported") class ServiceUnavailable(VcnsApiException): message = _("Service Unavailable: %(uri)s") class ServiceConflict(VcnsApiException): message = _("Concurrent object access error: %(uri)s") class AlreadyExists(VcnsApiException): message = _("Resource %(resource)s already exists") vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/edge_appliance_driver.py0000666000175100017510000010354313244523345027255 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from distutils import version import random import time from neutron_lib import constants as lib_const from neutron_lib import context as q_context from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils from sqlalchemy.orm import exc as sa_exc from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.common import nsxv_constants from vmware_nsx.common import utils from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield.common import constants from vmware_nsx.plugins.nsx_v.vshield.common import exceptions from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.plugins.nsx_v.vshield.tasks import ( constants as task_constants) from vmware_nsx.plugins.nsx_v.vshield.tasks import tasks LOG = logging.getLogger(__name__) class EdgeApplianceDriver(object): def __init__(self): super(EdgeApplianceDriver, self).__init__() # store the last task per edge that has the latest config self.updated_task = { 'nat': {}, 'route': {}, } random.seed() def _assemble_edge(self, name, appliance_size="compact", deployment_container_id=None, datacenter_moid=None, enable_aesni=True, dist=False, enable_fips=False, remote_access=False, edge_ha=False): edge = { 'name': name, 'fqdn': None, 'enableAesni': enable_aesni, 'enableFips': enable_fips, 'featureConfigs': { 'features': [ { 'featureType': 'firewall_4.0', 'globalConfig': { 'tcpTimeoutEstablished': 7200 } } ] }, 'cliSettings': { 'remoteAccess': remote_access }, 'autoConfiguration': { 'enabled': False, 'rulePriority': 'high' }, 'appliances': { 'applianceSize': appliance_size }, } if not dist: edge['type'] = "gatewayServices" edge['vnics'] = {'vnics': []} else: edge['type'] = "distributedRouter" edge['interfaces'] = {'interfaces': []} if deployment_container_id: edge['appliances']['deploymentContainerId'] = ( deployment_container_id) if datacenter_moid: edge['datacenterMoid'] = datacenter_moid if not dist and edge_ha: self._enable_high_availability(edge) return edge def _select_datastores(self, availability_zone): primary_ds = availability_zone.datastore_id secondary_ds = availability_zone.ha_datastore_id if availability_zone.ha_placement_random: # we want to switch primary and secondary datastores # half of the times, to balance it if random.random() > 0.5: primary_ds = availability_zone.ha_datastore_id secondary_ds = availability_zone.datastore_id return primary_ds, secondary_ds def _assemble_edge_appliances(self, availability_zone): appliances = [] if availability_zone.ha_datastore_id and availability_zone.edge_ha: # create appliance with HA primary_ds, secondary_ds = self._select_datastores( availability_zone) appliances.append(self._assemble_edge_appliance( availability_zone.resource_pool, primary_ds)) appliances.append(self._assemble_edge_appliance( availability_zone.resource_pool, secondary_ds)) elif availability_zone.datastore_id: # Single datastore appliances.append(self._assemble_edge_appliance( availability_zone.resource_pool, availability_zone.datastore_id)) return appliances def _assemble_edge_appliance(self, resource_pool_id, datastore_id): appliance = {} if resource_pool_id: appliance['resourcePoolId'] = resource_pool_id if datastore_id: appliance['datastoreId'] = datastore_id return appliance def _assemble_edge_vnic(self, name, index, portgroup_id, tunnel_index=-1, primary_address=None, subnet_mask=None, secondary=None, type="internal", enable_proxy_arp=False, enable_send_redirects=True, is_connected=True, mtu=1500, address_groups=None): vnic = { 'index': index, 'name': name, 'type': type, 'portgroupId': portgroup_id, 'mtu': mtu, 'enableProxyArp': enable_proxy_arp, 'enableSendRedirects': enable_send_redirects, 'isConnected': is_connected } if address_groups is None: address_groups = [] if not address_groups: if primary_address and subnet_mask: address_group = { 'primaryAddress': primary_address, 'subnetMask': subnet_mask } if secondary: address_group['secondaryAddresses'] = { 'ipAddress': secondary, 'type': 'secondary_addresses' } vnic['addressGroups'] = { 'addressGroups': [address_group] } else: vnic['subInterfaces'] = {'subInterfaces': address_groups} else: if tunnel_index < 0: vnic['addressGroups'] = {'addressGroups': address_groups} else: vnic['subInterfaces'] = {'subInterfaces': address_groups} return vnic def _assemble_vdr_interface(self, portgroup_id, primary_address=None, subnet_mask=None, secondary=None, type="internal", is_connected=True, mtu=1500, address_groups=None): interface = { 'type': type, 'connectedToId': portgroup_id, 'mtu': mtu, 'isConnected': is_connected } if address_groups is None: address_groups = [] if not address_groups: if primary_address and subnet_mask: address_group = { 'primaryAddress': primary_address, 'subnetMask': subnet_mask } if secondary: address_group['secondaryAddresses'] = { 'ipAddress': secondary, 'type': 'secondary_addresses' } interface['addressGroups'] = { 'addressGroups': [address_group] } else: interface['addressGroups'] = {'addressGroups': address_groups} interfaces = {'interfaces': [interface]} return interfaces def _edge_status_to_level(self, status): if status == 'GREEN': status_level = constants.RouterStatus.ROUTER_STATUS_ACTIVE elif status in ('GREY', 'YELLOW'): status_level = constants.RouterStatus.ROUTER_STATUS_DOWN else: status_level = constants.RouterStatus.ROUTER_STATUS_ERROR return status_level def _enable_loadbalancer(self, edge): if (not edge.get('featureConfigs') or not edge['featureConfigs'].get('features')): edge['featureConfigs'] = {'features': []} edge['featureConfigs']['features'].append( {'featureType': 'loadbalancer_4.0', 'enabled': True}) def _enable_high_availability(self, edge): if (not edge.get('featureConfigs') or not edge['featureConfigs'].get('features')): edge['featureConfigs'] = {'features': []} edge['featureConfigs']['features'].append( {'featureType': 'highavailability_4.0', 'enabled': True}) def get_edge_status(self, edge_id): try: response = self.vcns.get_edge_status(edge_id)[1] status_level = self._edge_status_to_level( response['edgeStatus']) except exceptions.VcnsApiException as e: LOG.error("VCNS: Failed to get edge %(edge_id)s status: " "Reason: %(reason)s", {'edge_id': edge_id, 'reason': e.response}) status_level = constants.RouterStatus.ROUTER_STATUS_ERROR try: desc = jsonutils.loads(e.response) if desc.get('errorCode') == ( constants.VCNS_ERROR_CODE_EDGE_NOT_RUNNING): status_level = constants.RouterStatus.ROUTER_STATUS_DOWN except ValueError: LOG.error('Error code not present. %s', e.response) return status_level def get_interface(self, edge_id, vnic_index): # get vnic interface address groups try: return self.vcns.query_interface(edge_id, vnic_index) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("NSXv: Failed to query vnic %s", vnic_index) def update_interface(self, router_id, edge_id, index, network, tunnel_index=-1, address=None, netmask=None, secondary=None, is_connected=True, address_groups=None): LOG.debug("VCNS: update vnic %(index)d: %(addr)s %(netmask)s", { 'index': index, 'addr': address, 'netmask': netmask}) if index == constants.EXTERNAL_VNIC_INDEX: name = constants.EXTERNAL_VNIC_NAME intf_type = 'uplink' else: name = constants.INTERNAL_VNIC_NAME + str(index) if tunnel_index < 0: intf_type = 'internal' else: intf_type = 'trunk' config = self._assemble_edge_vnic( name, index, network, tunnel_index, address, netmask, secondary, type=intf_type, address_groups=address_groups, is_connected=is_connected) self.vcns.update_interface(edge_id, config) def add_vdr_internal_interface(self, edge_id, network, address=None, netmask=None, secondary=None, address_groups=None, type="internal", is_connected=True): LOG.debug("Add VDR interface on edge: %s", edge_id) if address_groups is None: address_groups = [] interface_req = ( self._assemble_vdr_interface(network, address, netmask, secondary, address_groups=address_groups, is_connected=is_connected, type=type)) self.vcns.add_vdr_internal_interface(edge_id, interface_req) header, response = self.vcns.get_edge_interfaces(edge_id) for interface in response['interfaces']: if interface['connectedToId'] == network: vnic_index = int(interface['index']) return vnic_index def update_vdr_internal_interface(self, edge_id, index, network, address_groups=None, is_connected=True): if not address_groups: address_groups = [] interface = { 'type': 'internal', 'connectedToId': network, 'mtu': 1500, 'isConnected': is_connected, 'addressGroups': {'addressGroup': address_groups} } interface_req = {'interface': interface} try: header, response = self.vcns.update_vdr_internal_interface( edge_id, index, interface_req) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to update vdr interface on edge: " "%s", edge_id) def delete_vdr_internal_interface(self, edge_id, interface_index): LOG.debug("Delete VDR interface on edge: %s", edge_id) try: header, response = self.vcns.delete_vdr_internal_interface( edge_id, interface_index) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to delete vdr interface on edge: " "%s", edge_id) def delete_interface(self, router_id, edge_id, index): LOG.debug("Deleting vnic %(vnic_index)s: on edge %(edge_id)s", {'vnic_index': index, 'edge_id': edge_id}) try: self.vcns.delete_interface(edge_id, index) except exceptions.ResourceNotFound: LOG.error('Failed to delete vnic %(vnic_index)s on edge ' '%(edge_id)s: edge was not found', {'vnic_index': index, 'edge_id': edge_id}) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to delete vnic %(vnic_index)s: " "on edge %(edge_id)s", {'vnic_index': index, 'edge_id': edge_id}) LOG.debug("Deletion complete vnic %(vnic_index)s: on edge %(edge_id)s", {'vnic_index': index, 'edge_id': edge_id}) def deploy_edge(self, context, router_id, name, internal_network, dist=False, loadbalancer_enable=True, appliance_size=nsxv_constants.LARGE, availability_zone=None, deploy_metadata=False): edge_name = name edge = self._assemble_edge( edge_name, datacenter_moid=availability_zone.datacenter_moid, deployment_container_id=self.deployment_container_id, appliance_size=appliance_size, remote_access=False, dist=dist, edge_ha=availability_zone.edge_ha) appliances = self._assemble_edge_appliances(availability_zone) if appliances: edge['appliances']['appliances'] = appliances if not dist: vnic_external = self._assemble_edge_vnic( constants.EXTERNAL_VNIC_NAME, constants.EXTERNAL_VNIC_INDEX, availability_zone.external_network, type="uplink") edge['vnics']['vnics'].append(vnic_external) else: edge['mgmtInterface'] = { 'connectedToId': availability_zone.external_network, 'name': "mgmtInterface"} if internal_network: vnic_inside = self._assemble_edge_vnic( constants.INTERNAL_VNIC_NAME, constants.INTERNAL_VNIC_INDEX, internal_network, edge_utils.get_vdr_transit_network_plr_address(), edge_utils.get_vdr_transit_network_netmask(), type="internal") edge['vnics']['vnics'].append(vnic_inside) # If default login credentials for Edge are set, configure accordingly if (cfg.CONF.nsxv.edge_appliance_user and cfg.CONF.nsxv.edge_appliance_password): edge['cliSettings'].update({ 'userName': cfg.CONF.nsxv.edge_appliance_user, 'password': cfg.CONF.nsxv.edge_appliance_password}) if not dist and loadbalancer_enable: self._enable_loadbalancer(edge) edge_id = None try: header = self.vcns.deploy_edge(edge)[0] edge_id = header.get('location', '/').split('/')[-1] if edge_id: nsxv_db.update_nsxv_router_binding( context.session, router_id, edge_id=edge_id) if not dist: # Init Edge vnic binding nsxv_db.init_edge_vnic_binding( context.session, edge_id) else: if router_id: nsxv_db.update_nsxv_router_binding( context.session, router_id, status=lib_const.ERROR) error = _('Failed to deploy edge') raise nsxv_exc.NsxPluginException(err_msg=error) self.callbacks.complete_edge_creation( context, edge_id, name, router_id, dist, True, availability_zone=availability_zone, deploy_metadata=deploy_metadata) except exceptions.VcnsApiException: self.callbacks.complete_edge_creation( context, edge_id, name, router_id, dist, False, availability_zone=availability_zone) with excutils.save_and_reraise_exception(): LOG.exception("NSXv: deploy edge failed.") return edge_id def update_edge(self, context, router_id, edge_id, name, internal_network, dist=False, loadbalancer_enable=True, appliance_size=nsxv_constants.LARGE, set_errors=False, availability_zone=None): """Update edge name.""" edge = self._assemble_edge( name, datacenter_moid=availability_zone.datacenter_moid, deployment_container_id=self.deployment_container_id, appliance_size=appliance_size, remote_access=False, dist=dist, edge_ha=availability_zone.edge_ha) edge['id'] = edge_id appliances = self._assemble_edge_appliances(availability_zone) if appliances: edge['appliances']['appliances'] = appliances if not dist: vnic_external = self._assemble_edge_vnic( constants.EXTERNAL_VNIC_NAME, constants.EXTERNAL_VNIC_INDEX, availability_zone.external_network, type="uplink") edge['vnics']['vnics'].append(vnic_external) else: edge['mgmtInterface'] = { 'connectedToId': availability_zone.external_network, 'name': "mgmtInterface"} if internal_network: internal_vnic = self._assemble_edge_vnic( constants.INTERNAL_VNIC_NAME, constants.INTERNAL_VNIC_INDEX, internal_network, edge_utils.get_vdr_transit_network_plr_address(), edge_utils.get_vdr_transit_network_netmask(), type="internal") edge['vnics']['vnics'].append(internal_vnic) if not dist and loadbalancer_enable: self._enable_loadbalancer(edge) try: self.vcns.update_edge(edge_id, edge) self.callbacks.complete_edge_update( context, edge_id, router_id, True, set_errors) except exceptions.VcnsApiException as e: LOG.error("Failed to update edge: %s", e.response) self.callbacks.complete_edge_update( context, edge_id, router_id, False, set_errors) return False return True def rename_edge(self, edge_id, name): """rename edge.""" try: # First get the current edge structure # [0] is the status, [1] is the body edge = self.vcns.get_edge(edge_id)[1] if edge['name'] == name: LOG.debug('Edge %s is already named %s', edge_id, name) return # remove some data that will make the update fail edge_utils.remove_irrelevant_keys_from_edge_request(edge) # set the new name in the request edge['name'] = name # update the edge self.vcns.update_edge(edge_id, edge) except exceptions.VcnsApiException as e: LOG.error("Failed to rename edge: %s", e.response) def resize_edge(self, edge_id, size): """update the size of a router edge.""" try: # First get the current edge structure # [0] is the status, [1] is the body edge = self.vcns.get_edge(edge_id)[1] if edge.get('appliances'): if edge['appliances']['applianceSize'] == size: LOG.debug('Edge %s is already with size %s', edge_id, size) return ver = self.vcns.get_version() if version.LooseVersion(ver) < version.LooseVersion('6.2.3'): # remove some data that will make the update fail edge_utils.remove_irrelevant_keys_from_edge_request(edge) # set the new size in the request edge['appliances']['applianceSize'] = size # update the edge self.vcns.update_edge(edge_id, edge) except exceptions.VcnsApiException as e: LOG.error("Failed to resize edge: %s", e.response) def delete_edge(self, context, router_id, edge_id, dist=False): LOG.debug("Deleting edge %s", edge_id) if context is None: context = q_context.get_admin_context() try: LOG.debug("Deleting router binding %s", router_id) nsxv_db.delete_nsxv_router_binding(context.session, router_id) if not dist: LOG.debug("Deleting vnic bindings for edge %s", edge_id) nsxv_db.clean_edge_vnic_binding(context.session, edge_id) except sa_exc.NoResultFound: LOG.warning("Router Binding for %s not found", router_id) if edge_id: try: self.vcns.delete_edge(edge_id) return True except exceptions.ResourceNotFound: return True except exceptions.VcnsApiException as e: LOG.exception("VCNS: Failed to delete %(edge_id)s:\n" "%(response)s", {'edge_id': edge_id, 'response': e.response}) return False except Exception: LOG.exception("VCNS: Failed to delete %s", edge_id) return False def _assemble_nat_rule(self, action, original_address, translated_address, vnic_index=None, enabled=True, protocol='any', original_port='any', translated_port='any'): nat_rule = {} nat_rule['action'] = action if vnic_index is not None: nat_rule['vnic'] = vnic_index nat_rule['originalAddress'] = original_address nat_rule['translatedAddress'] = translated_address nat_rule['enabled'] = enabled nat_rule['protocol'] = protocol nat_rule['originalPort'] = original_port nat_rule['translatedPort'] = translated_port return nat_rule def get_nat_config(self, edge_id): try: return self.vcns.get_nat_config(edge_id)[1] except exceptions.VcnsApiException as e: LOG.exception("VCNS: Failed to get nat config:\n%s", e.response) raise e def update_nat_rules(self, edge_id, snats, dnats, indices=None): LOG.debug("VCNS: update nat rule\n" "SNAT:%(snat)s\n" "DNAT:%(dnat)s\n" "INDICES: %(index)s\n", { 'snat': snats, 'dnat': dnats, 'index': indices}) nat_rules = [] for dnat in dnats: vnic_index = None if 'vnic_index' in dnat: vnic_index = dnat['vnic_index'] if vnic_index or not indices: # we are adding a predefined index or # adding to all interfaces nat_rules.append(self._assemble_nat_rule( 'dnat', dnat['dst'], dnat['translated'], vnic_index=vnic_index )) nat_rules.append(self._assemble_nat_rule( 'snat', dnat['translated'], dnat['dst'], vnic_index=vnic_index )) else: for index in indices: nat_rules.append(self._assemble_nat_rule( 'dnat', dnat['dst'], dnat['translated'], vnic_index=index )) nat_rules.append(self._assemble_nat_rule( 'snat', dnat['translated'], dnat['dst'], vnic_index=index )) for snat in snats: vnic_index = None if 'vnic_index' in snat: vnic_index = snat['vnic_index'] if vnic_index or not indices: # we are adding a predefined index # or adding to all interfaces nat_rules.append(self._assemble_nat_rule( 'snat', snat['src'], snat['translated'], vnic_index=vnic_index )) else: for index in indices: nat_rules.append(self._assemble_nat_rule( 'snat', snat['src'], snat['translated'], vnic_index=index )) nat = { 'featureType': 'nat', 'rules': { 'natRulesDtos': nat_rules } } try: self.vcns.update_nat_config(edge_id, nat) return True except exceptions.VcnsApiException as e: LOG.exception("VCNS: Failed to create snat rule:\n%s", e.response) return False def update_routes(self, edge_id, gateway, routes): if gateway: gateway = gateway.split('/')[0] static_routes = [] for route in routes: if route.get('vnic_index') is None: static_routes.append({ "description": "", "vnic": constants.INTERNAL_VNIC_INDEX, "network": route['cidr'], "nextHop": route['nexthop'] }) else: static_routes.append({ "description": "", "vnic": route['vnic_index'], "network": route['cidr'], "nextHop": route['nexthop'] }) request = { "staticRoutes": { "staticRoutes": static_routes } } if gateway: request["defaultRoute"] = { "description": "default-gateway", "gatewayAddress": gateway } try: self.vcns.update_routes(edge_id, request) return True except exceptions.VcnsApiException as e: LOG.exception("VCNS: Failed to update routes:\n%s", e.response) return False def create_lswitch(self, name, tz_config, tags=None, port_isolation=False, replication_mode="service"): lsconfig = { 'display_name': utils.check_and_truncate(name), "tags": tags or [], "type": "LogicalSwitchConfig", "_schema": "/ws.v1/schema/LogicalSwitchConfig", "transport_zones": tz_config } if port_isolation is bool: lsconfig["port_isolation_enabled"] = port_isolation if replication_mode: lsconfig["replication_mode"] = replication_mode response = self.vcns.create_lswitch(lsconfig)[1] return response def delete_lswitch(self, lswitch_id): self.vcns.delete_lswitch(lswitch_id) def get_loadbalancer_config(self, edge_id): try: header, response = self.vcns.get_loadbalancer_config( edge_id) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to get service config") return response def enable_service_loadbalancer(self, edge_id): config = self.get_loadbalancer_config( edge_id) if not config['enabled']: config['enabled'] = True try: self.vcns.enable_service_loadbalancer(edge_id, config) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to enable loadbalancer " "service config") def _delete_port_group(self, task): try: self.vcns.delete_port_group( task.userdata['dvs_id'], task.userdata['port_group_id']) except Exception as e: LOG.error('Unable to delete %(pg)s exception %(ex)s', {'pg': task.userdata['port_group_id'], 'ex': e}) return task_constants.TaskStatus.ERROR return task_constants.TaskStatus.COMPLETED def _retry_task(self, task): delay = 0.5 max_retries = max(cfg.CONF.nsxv.retries, 1) args = task.userdata.get('args', []) kwargs = task.userdata.get('kwargs', {}) retry_number = task.userdata['retry_number'] retry_command = task.userdata['retry_command'] try: retry_command(*args, **kwargs) except Exception as exc: LOG.debug("Task %(name)s retry %(retry)s failed %(exc)s", {'name': task.name, 'exc': exc, 'retry': retry_number}) retry_number += 1 if retry_number > max_retries: with excutils.save_and_reraise_exception(): LOG.exception("Failed to %s", task.name) else: task.userdata['retry_number'] = retry_number # Sleep twice as long as the previous retry tts = (2 ** (retry_number - 1)) * delay time.sleep(min(tts, 60)) return task_constants.TaskStatus.PENDING LOG.info("Task %(name)s completed.", {'name': task.name}) return task_constants.TaskStatus.COMPLETED def delete_port_group(self, dvs_id, port_group_id): task_name = 'delete-port-group-%s-%s' % (port_group_id, dvs_id) userdata = {'retry_number': 1, 'retry_command': self.vcns.delete_port_group, 'args': [dvs_id, port_group_id]} task = tasks.Task(task_name, port_group_id, self._retry_task, status_callback=self._retry_task, userdata=userdata) self.task_manager.add(task) def delete_virtual_wire(self, vw_id): task_name = 'delete-virtualwire-%s' % vw_id userdata = {'retry_number': 1, 'retry_command': self.vcns.delete_virtual_wire, 'args': [vw_id]} task = tasks.Task(task_name, vw_id, self._retry_task, status_callback=self._retry_task, userdata=userdata) self.task_manager.add(task) def create_bridge(self, device_name, bridge): try: self.vcns.create_bridge(device_name, bridge) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to create bridge in the %s", device_name) def delete_bridge(self, device_name): try: self.vcns.delete_bridge(device_name) except exceptions.VcnsApiException: LOG.exception("Failed to delete bridge in the %s", device_name) def update_edge_ha(self, edge_id): ha_request = { 'featureType': "highavailability_4.0", 'enabled': True} self.vcns.enable_ha(edge_id, ha_request) def update_edge_syslog(self, edge_id, syslog_config, router_id): if 'server_ip' not in syslog_config: LOG.warning("Server IP missing in syslog config for %s", router_id) return protocol = syslog_config.get('protocol', 'tcp') if protocol not in ['tcp', 'udp']: LOG.warning("Invalid protocol in syslog config for %s", router_id) return loglevel = syslog_config.get('log_level') if loglevel and loglevel not in edge_utils.SUPPORTED_EDGE_LOG_LEVELS: LOG.warning("Invalid loglevel in syslog config for %s", router_id) return server_ip = syslog_config['server_ip'] request = {'featureType': 'syslog', 'protocol': protocol, 'serverAddresses': {'ipAddress': [server_ip], 'type': 'IpAddressesDto'}} # edge allows up to 2 syslog servers if 'server2_ip' in syslog_config: request['serverAddresses']['ipAddress'].append( syslog_config['server2_ip']) self.vcns.update_edge_syslog(edge_id, request) # update log level for routing in separate API call if loglevel: edge_utils.update_edge_loglevel(self.vcns, edge_id, 'routing', loglevel) vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/nsxv_loadbalancer.py0000666000175100017510000003142313244523345026444 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from vmware_nsx.plugins.nsx_v.vshield import nsxv_edge_cfg_obj LOG = logging.getLogger(__name__) class NsxvLoadbalancer(nsxv_edge_cfg_obj.NsxvEdgeCfgObj): SERVICE_NAME = 'loadbalancer' def __init__( self, enabled=True, enable_service_insertion=False, acceleration_enabled=False): super(NsxvLoadbalancer, self).__init__() self.payload = { 'enabled': enabled, 'enableServiceInsertion': enable_service_insertion, 'accelerationEnabled': acceleration_enabled} self.virtual_servers = {} def get_service_name(self): return self.SERVICE_NAME def add_virtual_server(self, virtual_server): self.virtual_servers[virtual_server.payload['name']] = virtual_server def del_virtual_server(self, name): self.virtual_servers.pop(name, None) def serializable_payload(self): virt_servers = [] app_profiles = [] app_rules = [] pools = [] monitors = [] virt_id = 1 app_prof_id = 1 app_rule_id = 1 pool_id = 1 monitor_id = 1 member_id = 1 for virtual_server in self.virtual_servers.values(): s_virt = virtual_server.payload.copy() s_virt['virtualServerId'] = 'virtualServer-%d' % virt_id virt_id += 1 # Setup app profile s_app_prof = virtual_server.app_profile.payload.copy() s_app_prof['applicationProfileId'] = ('applicationProfile-%d' % app_prof_id) app_profiles.append(s_app_prof) app_prof_id += 1 # Bind virtual server to app profile s_virt['applicationProfileId'] = s_app_prof['applicationProfileId'] # Setup app rules if virtual_server.app_rules.values(): s_virt['applicationRuleId'] = [] for app_rule in virtual_server.app_rules.values(): s_app_rule = app_rule.payload.copy() s_app_rule['applicationRuleId'] = ('applicationRule-%d' % app_rule_id) app_rule_id += 1 # Add to LB object, bind to virtual server app_rules.append(s_app_rule) s_virt['applicationRuleId'].append( s_app_rule['applicationRuleId']) # Setup pools s_pool = virtual_server.default_pool.payload.copy() s_pool['poolId'] = 'pool-%d' % pool_id pool_id += 1 pools.append(s_pool) # Add pool members s_pool['member'] = [] for member in virtual_server.default_pool.members.values(): s_m = member.payload.copy() s_m['memberId'] = 'member-%d' % member_id member_id += 1 s_pool['member'].append(s_m) # Bind pool to virtual server s_virt['defaultPoolId'] = s_pool['poolId'] s_pool['monitorId'] = [] # Add monitors for monitor in virtual_server.default_pool.monitors.values(): s_mon = monitor.payload.copy() s_mon['monitorId'] = 'monitor-%d' % monitor_id monitor_id += 1 s_pool['monitorId'].append(s_mon['monitorId']) monitors.append(s_mon) virt_servers.append(s_virt) payload = self.payload.copy() payload['applicationProfile'] = app_profiles if app_rules: payload['applicationRule'] = app_rules payload['monitor'] = monitors payload['pool'] = pools payload['virtualServer'] = virt_servers payload['featureType'] = 'loadbalancer_4.0' return payload @staticmethod def get_loadbalancer(vcns_obj, edge_id): edge_lb = nsxv_edge_cfg_obj.NsxvEdgeCfgObj.get_object( vcns_obj, edge_id, '%s/config' % NsxvLoadbalancer.SERVICE_NAME) lb_obj = NsxvLoadbalancer( edge_lb['enabled'], edge_lb['enableServiceInsertion'], edge_lb['accelerationEnabled']) # Construct loadbalancer objects for virt_srvr in edge_lb['virtualServer']: v_s = NsxvLBVirtualServer( virt_srvr['name'], virt_srvr['ipAddress'], virt_srvr['port'], virt_srvr['protocol'], virt_srvr['enabled'], virt_srvr['accelerationEnabled'], virt_srvr['connectionLimit']) # Find application profile objects, attach to virtual server for app_prof in edge_lb['applicationProfile']: if (virt_srvr['applicationProfileId'] == app_prof['applicationProfileId']): a_p = NsxvLBAppProfile( app_prof['name'], app_prof['serverSslEnabled'], app_prof['sslPassthrough'], app_prof['template'], app_prof['insertXForwardedFor']) if app_prof.get('persistence'): a_p.set_persistence( True, app_prof['persistence']['method'], app_prof['persistence'].get('cookieName'), app_prof['persistence'].get('cookieMode'), app_prof['persistence'].get('expire')) v_s.set_app_profile(a_p) # Find default pool, attach to virtual server for pool in edge_lb['pool']: if virt_srvr['defaultPoolId'] == pool['poolId']: p = NsxvLBPool( pool['name'], pool['algorithm'], pool['transparent']) # Add pool members to pool for member in pool['member']: m = NsxvLBPoolMember( member['name'], member['ipAddress'], member['port'], member['monitorPort'], member['condition'], member['weight'], member['minConn'], member['maxConn']) p.add_member(m) # Add monitors to pool for mon in edge_lb['monitor']: if mon['monitorId'] in pool['monitorId']: m = NsxvLBMonitor( mon['name'], mon['interval'], mon['maxRetries'], mon['method'], mon['timeout'], mon['type'], mon['url']) p.add_monitor(m) v_s.set_default_pool(p) # Add application rules to virtual server for rule in edge_lb['applicationRule']: if rule['applicationRuleId'] in virt_srvr['applicationRuleId']: r = NsxvLBAppRule( rule['name'], rule['script']) v_s.add_app_rule(r) lb_obj.add_virtual_server(v_s) return lb_obj class NsxvLBAppProfile(object): def __init__( self, name, server_ssl_enabled=False, ssl_pass_through=False, template='TCP', insert_xff=False, client_ssl_cert=None, persist=False, persist_method='cookie', persist_cookie_name='JSESSIONID', persist_cookie_mode='insert', persist_expire=30): self.payload = { 'name': name, 'serverSslEnabled': server_ssl_enabled, 'sslPassthrough': ssl_pass_through, 'template': template, 'insertXForwardedFor': insert_xff} if persist: self.payload['persistence'] = { 'method': persist_method, 'expire': persist_expire } if persist_cookie_mode == 'cookie': self.payload['persistence']['cookieMode'] = persist_cookie_mode self.payload['persistence']['cookieName'] = persist_cookie_name if client_ssl_cert: self.payload['clientSsl'] = { 'clientAuth': 'ignore', 'serviceCertificate': [client_ssl_cert] } def set_persistence( self, persist=False, persist_method='cookie', persist_cookie_name='JSESSIONID', persist_cookie_mode='insert', persist_expire=30): if persist: self.payload['persistence'] = { 'method': persist_method, 'expire': persist_expire } if persist_cookie_mode == 'cookie': self.payload['persistence']['cookieMode'] = persist_cookie_mode self.payload['persistence']['cookieName'] = persist_cookie_name else: self.payload.pop('persistence', None) class NsxvLBAppRule(object): def __init__(self, name, script): self.payload = { 'name': name, 'script': script} class NsxvLBVirtualServer(object): def __init__( self, name, ip_address, port=80, protocol='HTTP', enabled=True, acceleration_enabled=False, connection_limit=0, enable_service_insertion=False): self.payload = { 'name': name, 'ipAddress': ip_address, 'port': port, 'protocol': protocol, 'enabled': enabled, 'accelerationEnabled': acceleration_enabled, 'connectionLimit': connection_limit, 'enableServiceInsertion': enable_service_insertion} self.app_rules = {} self.app_profile = None self.default_pool = None def add_app_rule(self, app_rule): self.app_rules[app_rule.payload['name']] = app_rule def del_app_rule(self, name): self.app_rules.pop(name, None) def set_default_pool(self, pool): self.default_pool = pool def set_app_profile(self, app_profile): self.app_profile = app_profile class NsxvLBMonitor(object): def __init__( self, name, interval=10, max_retries=3, method='GET', timeout=15, mon_type='http', url='/'): self.payload = { 'name': name, 'interval': interval, 'maxRetries': max_retries, 'method': method, 'timeout': timeout, 'type': mon_type, 'url': url} class NsxvLBPoolMember(object): def __init__( self, name, ip_address, port, monitor_port=None, condition='enabled', weight=1, min_conn=0, max_conn=0): self.payload = { 'name': name, 'ipAddress': ip_address, 'port': port, 'monitorPort': monitor_port, 'condition': condition, 'weight': weight, 'minConn': min_conn, 'maxConn': max_conn} class NsxvLBPool(object): def __init__( self, name, algorithm='round-robin', transparent=False): self.payload = { 'name': name, 'algorithm': algorithm, 'transparent': transparent} self.members = {} self.monitors = {} def add_member(self, member): self.members[member.payload['name']] = member def del_member(self, name): self.members.pop(name, None) def add_monitor(self, monitor): self.monitors[monitor.payload['name']] = monitor def del_monitor(self, name): self.monitors.pop(name, None) vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/edge_firewall_driver.py0000666000175100017510000004365013244523345027130 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield.common import ( exceptions as vcns_exc) LOG = logging.getLogger(__name__) VSE_FWAAS_ALLOW = "accept" VSE_FWAAS_DENY = "deny" VSE_FWAAS_REJECT = "reject" FWAAS_ALLOW = "allow" FWAAS_DENY = "deny" FWAAS_REJECT = "reject" FWAAS_ALLOW_EXT_RULE_NAME = 'Allow To External' class EdgeFirewallDriver(object): """Implementation of driver APIs for Edge Firewall feature configuration """ def __init__(self): super(EdgeFirewallDriver, self).__init__() self._icmp_echo_application_ids = None def _convert_firewall_action(self, action): if action == FWAAS_ALLOW: return VSE_FWAAS_ALLOW elif action == FWAAS_DENY: return VSE_FWAAS_DENY elif action == FWAAS_REJECT: return VSE_FWAAS_REJECT else: msg = _("Invalid action value %s in a firewall rule") % action raise vcns_exc.VcnsBadRequest(resource='firewall_rule', msg=msg) def _restore_firewall_action(self, action): if action == VSE_FWAAS_ALLOW: return FWAAS_ALLOW elif action == VSE_FWAAS_DENY: return FWAAS_DENY elif action == VSE_FWAAS_REJECT: return FWAAS_REJECT else: msg = (_("Invalid action value %s in " "a vshield firewall rule") % action) raise vcns_exc.VcnsBadRequest(resource='firewall_rule', msg=msg) def _get_port_range(self, min_port, max_port): if not min_port or min_port == 'any': return None if min_port == max_port: return str(min_port) else: return '%d:%d' % (min_port, max_port) def _get_ports_list_from_string(self, port_str): """Receives a string representation of the service ports, and return a list of integers Supported formats: Empty string - no ports "number" - a single port "num1:num2" - a range "num1,num2,num3" - a list """ if not port_str or port_str == 'any': return [] if ':' in port_str: min_port, sep, max_port = port_str.partition(":") return ["%s-%s" % (int(min_port.strip()), int(max_port.strip()))] if ',' in port_str: # remove duplications (using set) and empty/non numeric entries ports_set = set() for orig_port in port_str.split(','): port = orig_port.strip() if port and port.isdigit(): ports_set.add(int(port)) return sorted(list(ports_set)) else: return [int(port_str.strip())] def _convert_firewall_rule(self, rule, index=None): vcns_rule = { "action": self._convert_firewall_action(rule['action']), "enabled": rule.get('enabled', True)} if rule.get('name'): vcns_rule['name'] = rule['name'] if rule.get('description'): vcns_rule['description'] = rule['description'] if rule.get('source_ip_address'): vcns_rule['source'] = { "ipAddress": rule['source_ip_address'] } if rule.get('source_vnic_groups'): vcns_rule['source'] = { "vnicGroupId": rule['source_vnic_groups'] } if rule.get('destination_ip_address'): vcns_rule['destination'] = { "ipAddress": rule['destination_ip_address'] } if rule.get('destination_vnic_groups'): vcns_rule['destination'] = { "vnicGroupId": rule['destination_vnic_groups'] } if rule.get('application'): vcns_rule['application'] = rule['application'] service = {} if rule.get('source_port'): service['sourcePort'] = self._get_ports_list_from_string( rule['source_port']) if rule.get('destination_port'): service['port'] = self._get_ports_list_from_string( rule['destination_port']) if rule.get('protocol'): service['protocol'] = rule['protocol'] if rule['protocol'] == 'icmp': if rule.get('icmp_type'): service['icmpType'] = rule['icmp_type'] else: service['icmpType'] = 'any' if rule.get('ruleId'): vcns_rule['ruleId'] = rule.get('ruleId') if service: vcns_rule['application'] = { 'service': [service] } if rule.get('logged'): vcns_rule['loggingEnabled'] = rule['logged'] if index: vcns_rule['ruleTag'] = index return vcns_rule def _restore_firewall_rule(self, context, edge_id, rule): fw_rule = {} rule_binding = nsxv_db.get_nsxv_edge_firewallrule_binding_by_vseid( context.session, edge_id, rule['ruleId']) if rule_binding: fw_rule['id'] = rule_binding['rule_id'] fw_rule['ruleId'] = rule['ruleId'] if rule.get('source'): src = rule['source'] fw_rule['source_ip_address'] = src['ipAddress'] fw_rule['source_vnic_groups'] = src['vnicGroupId'] if rule.get('destination'): dest = rule['destination'] fw_rule['destination_ip_address'] = dest['ipAddress'] fw_rule['destination_vnic_groups'] = dest['vnicGroupId'] if 'application' in rule and 'service' in rule['application']: service = rule['application']['service'][0] fw_rule['protocol'] = service['protocol'] if service.get('sourcePort'): fw_rule['source_port'] = self._get_port_range( service['sourcePort'][0], service['sourcePort'][-1]) if service.get('destination_port'): fw_rule['destination_port'] = self._get_port_range( service['port'][0], service['port'][-1]) fw_rule['action'] = self._restore_firewall_action(rule['action']) fw_rule['enabled'] = rule['enabled'] if rule.get('name'): fw_rule['name'] = rule['name'] if rule.get('description'): fw_rule['description'] = rule['description'] if rule.get('loggingEnabled'): fw_rule['logged'] = rule['loggingEnabled'] return fw_rule def _convert_firewall(self, firewall, allow_external=False): ruleTag = 1 vcns_rules = [] for rule in firewall['firewall_rule_list']: tag = rule.get('ruleTag', ruleTag) vcns_rule = self._convert_firewall_rule(rule, tag) vcns_rules.append(vcns_rule) if not rule.get('ruleTag'): ruleTag += 1 if allow_external: # Add the allow-external rule with the latest tag vcns_rules.append({'name': FWAAS_ALLOW_EXT_RULE_NAME, 'action': "accept", 'enabled': True, 'destination': {'vnicGroupId': ["external"]}, 'ruleTag': ruleTag}) return { 'featureType': "firewall_4.0", 'globalConfig': {'tcpTimeoutEstablished': 7200}, 'firewallRules': { 'firewallRules': vcns_rules}} def _restore_firewall(self, context, edge_id, response): res = {} res['firewall_rule_list'] = [] for rule in response['firewallRules']['firewallRules']: if rule.get('ruleType') == 'default_policy': continue firewall_rule = self._restore_firewall_rule(context, edge_id, rule) res['firewall_rule_list'].append({'firewall_rule': firewall_rule}) return res def _get_firewall(self, edge_id): try: return self.vcns.get_firewall(edge_id)[1] except vcns_exc.VcnsApiException as e: LOG.exception("Failed to get firewall with edge " "id: %s", edge_id) raise e def _get_firewall_rule_next(self, context, edge_id, rule_vseid): # Return the firewall rule below 'rule_vseid' fw_cfg = self._get_firewall(edge_id) for i in range(len(fw_cfg['firewallRules']['firewallRules'])): rule_cur = fw_cfg['firewallRules']['firewallRules'][i] if str(rule_cur['ruleId']) == rule_vseid: if (i + 1) == len(fw_cfg['firewallRules']['firewallRules']): return None else: return fw_cfg['firewallRules']['firewallRules'][i + 1] def get_firewall_rule(self, context, id, edge_id): rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding( context.session, id, edge_id) if rule_map is None: msg = _("No rule id:%s found in the edge_firewall_binding") % id LOG.error(msg) raise vcns_exc.VcnsNotFound( resource='vcns_firewall_rule_bindings', msg=msg) vcns_rule_id = rule_map.rule_vseid try: response = self.vcns.get_firewall_rule( edge_id, vcns_rule_id)[1] except vcns_exc.VcnsApiException as e: LOG.exception("Failed to get firewall rule: %(rule_id)s " "with edge_id: %(edge_id)s", { 'rule_id': id, 'edge_id': edge_id}) raise e return self._restore_firewall_rule(context, edge_id, response) def get_firewall(self, context, edge_id): response = self._get_firewall(edge_id) return self._restore_firewall(context, edge_id, response) def delete_firewall(self, context, edge_id): try: self.vcns.delete_firewall(edge_id) except vcns_exc.VcnsApiException as e: LOG.exception("Failed to delete firewall " "with edge_id:%s", edge_id) raise e nsxv_db.cleanup_nsxv_edge_firewallrule_binding( context.session, edge_id) def update_firewall_rule(self, context, id, edge_id, firewall_rule): rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding( context.session, id, edge_id) vcns_rule_id = rule_map.rule_vseid fwr_req = self._convert_firewall_rule(firewall_rule) try: self.vcns.update_firewall_rule(edge_id, vcns_rule_id, fwr_req) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to update firewall rule: " "%(rule_id)s " "with edge_id: %(edge_id)s", {'rule_id': id, 'edge_id': edge_id}) def delete_firewall_rule(self, context, id, edge_id): rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding( context.session, id, edge_id) vcns_rule_id = rule_map.rule_vseid try: self.vcns.delete_firewall_rule(edge_id, vcns_rule_id) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to delete firewall rule: " "%(rule_id)s " "with edge_id: %(edge_id)s", {'rule_id': id, 'edge_id': edge_id}) nsxv_db.delete_nsxv_edge_firewallrule_binding( context.session, id) def _add_rule_above(self, context, ref_rule_id, edge_id, firewall_rule): rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding( context.session, ref_rule_id, edge_id) ref_vcns_rule_id = rule_map.rule_vseid fwr_req = self._convert_firewall_rule(firewall_rule) try: header = self.vcns.add_firewall_rule_above( edge_id, ref_vcns_rule_id, fwr_req)[0] except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to add firewall rule above: " "%(rule_id)s with edge_id: %(edge_id)s", {'rule_id': ref_vcns_rule_id, 'edge_id': edge_id}) objuri = header['location'] fwr_vseid = objuri[objuri.rfind("/") + 1:] map_info = { 'rule_id': firewall_rule['id'], 'rule_vseid': fwr_vseid, 'edge_id': edge_id} nsxv_db.add_nsxv_edge_firewallrule_binding( context.session, map_info) def _add_rule_below(self, context, ref_rule_id, edge_id, firewall_rule): rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding( context.session, ref_rule_id, edge_id) ref_vcns_rule_id = rule_map.rule_vseid fwr_vse_next = self._get_firewall_rule_next( context, edge_id, ref_vcns_rule_id) fwr_req = self._convert_firewall_rule(firewall_rule) if fwr_vse_next: ref_vcns_rule_id = fwr_vse_next['ruleId'] try: header = self.vcns.add_firewall_rule_above( edge_id, int(ref_vcns_rule_id), fwr_req)[0] except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to add firewall rule above: " "%(rule_id)s with edge_id: %(edge_id)s", {'rule_id': ref_vcns_rule_id, 'edge_id': edge_id}) else: # append the rule at the bottom try: header = self.vcns.add_firewall_rule( edge_id, fwr_req)[0] except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to append a firewall rule" "with edge_id: %s", edge_id) objuri = header['location'] fwr_vseid = objuri[objuri.rfind("/") + 1:] map_info = { 'rule_id': firewall_rule['id'], 'rule_vseid': fwr_vseid, 'edge_id': edge_id } nsxv_db.add_nsxv_edge_firewallrule_binding( context.session, map_info) def insert_rule(self, context, rule_info, edge_id, fwr): if rule_info.get('insert_before'): self._add_rule_above( context, rule_info['insert_before'], edge_id, fwr) elif rule_info.get('insert_after'): self._add_rule_below( context, rule_info['insert_after'], edge_id, fwr) else: msg = _("Can't execute insert rule operation " "without reference rule_id") raise vcns_exc.VcnsBadRequest(resource='firewall_rule', msg=msg) def update_firewall(self, edge_id, firewall, context, allow_external=True): config = self._convert_firewall(firewall, allow_external=allow_external) try: self.vcns.update_firewall(edge_id, config) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to update firewall " "with edge_id: %s", edge_id) vcns_fw_config = self._get_firewall(edge_id) nsxv_db.cleanup_nsxv_edge_firewallrule_binding( context.session, edge_id) self._create_rule_id_mapping( context, edge_id, firewall, vcns_fw_config) def _create_rule_id_mapping( self, context, edge_id, firewall, vcns_fw): for rule in vcns_fw['firewallRules']['firewallRules']: if rule.get('ruleTag'): index = rule['ruleTag'] - 1 # TODO(linb):a simple filter of the retrieved rules which may # be created by other operations unintentionally if index < len(firewall['firewall_rule_list']): rule_vseid = rule['ruleId'] rule_id = firewall['firewall_rule_list'][index].get('id') if rule_id: map_info = { 'rule_id': rule_id, 'rule_vseid': rule_vseid, 'edge_id': edge_id } nsxv_db.add_nsxv_edge_firewallrule_binding( context.session, map_info) def get_icmp_echo_application_ids(self): # check cached list first # (if backend version changes, neutron should be restarted) if self._icmp_echo_application_ids: return self._icmp_echo_application_ids self._icmp_echo_application_ids = self.get_application_ids( ['ICMP Echo', 'IPv6-ICMP Echo']) if not self._icmp_echo_application_ids: raise nsx_exc.NsxResourceNotFound( res_name='ICMP Echo', res_id='') return self._icmp_echo_application_ids def get_application_ids(self, application_names): results = self.vcns.list_applications() application_ids = [] for result in results: for name in application_names: if result['name'] == name: application_ids.append(result['objectId']) return application_ids vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/tasks/0000775000175100017510000000000013244524600023520 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/tasks/tasks.py0000666000175100017510000003104613244523345025232 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import uuid from eventlet import event from eventlet import greenthread from neutron_lib import exceptions from oslo_log import log as logging from oslo_service import loopingcall import six from vmware_nsx._i18n import _ from vmware_nsx.plugins.nsx_v.vshield.tasks import constants DEFAULT_INTERVAL = 1000 LOG = logging.getLogger(__name__) def nop(task): return constants.TaskStatus.COMPLETED class TaskException(exceptions.NeutronException): def __init__(self, message=None, **kwargs): if message is not None: self.message = message super(TaskException, self).__init__(**kwargs) class InvalidState(TaskException): message = _("Invalid state %(state)d") class TaskStateSkipped(TaskException): message = _("State %(state)d skipped. Current state %(current)d") class Task(object): def __init__(self, name, resource_id, execute_callback, status_callback=nop, result_callback=nop, userdata=None): self.name = name self.resource_id = resource_id self._execute_callback = execute_callback self._status_callback = status_callback self._result_callback = result_callback self.userdata = userdata self.id = None self.status = None self._monitors = { constants.TaskState.START: [], constants.TaskState.EXECUTED: [], constants.TaskState.RESULT: [] } self._states = [None, None, None, None] self._state = constants.TaskState.NONE def _add_monitor(self, action, func): self._monitors[action].append(func) return self def _move_state(self, state): self._state = state if self._states[state] is not None: e = self._states[state] self._states[state] = None e.send() for s in range(state): if self._states[s] is not None: e = self._states[s] self._states[s] = None e.send_exception( TaskStateSkipped(state=s, current=self._state)) def _invoke_monitor(self, state): for func in self._monitors[state]: try: func(self) except Exception: LOG.exception("Task %(task)s encountered exception in " "%(func)s at state %(state)s", {'task': str(self), 'func': str(func), 'state': state}) self._move_state(state) return self def _start(self): return self._invoke_monitor(constants.TaskState.START) def _executed(self): return self._invoke_monitor(constants.TaskState.EXECUTED) def _update_status(self, status): if self.status == status: return self self.status = status def _finished(self): return self._invoke_monitor(constants.TaskState.RESULT) def add_start_monitor(self, func): return self._add_monitor(constants.TaskState.START, func) def add_executed_monitor(self, func): return self._add_monitor(constants.TaskState.EXECUTED, func) def add_result_monitor(self, func): return self._add_monitor(constants.TaskState.RESULT, func) def wait(self, state): if (state < constants.TaskState.START or state > constants.TaskState.RESULT or state == constants.TaskState.STATUS): raise InvalidState(state=state) if state <= self._state: # we already passed this current state, so no wait return e = event.Event() self._states[state] = e e.wait() def __repr__(self): return "Task-%s-%s-%s" % ( self.name.encode('ascii', 'ignore'), self.resource_id.encode('ascii', 'ignore'), self.id) class TaskManager(object): _instance = None _default_interval = DEFAULT_INTERVAL def __init__(self, interval=None): self._interval = interval or TaskManager._default_interval # A queue to pass tasks from other threads self._tasks_queue = collections.deque() # A dict to store resource -> resource's tasks self._tasks = {} # Current task being executed in main thread self._main_thread_exec_task = None # New request event self._req = event.Event() # TaskHandler stopped event self._stopped = False # Periodic function trigger self._monitor = None self._monitor_busy = False # Thread handling the task request self._thread = None def _execute(self, task): """Execute task.""" LOG.debug("Start task %s", str(task)) task._start() try: status = task._execute_callback(task) except Exception: LOG.exception("Task %(task)s encountered exception in " "%(cb)s", {'task': str(task), 'cb': str(task._execute_callback)}) status = constants.TaskStatus.ERROR LOG.debug("Task %(task)s return %(status)s", {'task': str(task), 'status': status}) task._update_status(status) task._executed() return status def _result(self, task): """Notify task execution result.""" try: task._result_callback(task) except Exception: LOG.exception("Task %(task)s encountered exception in " "%(cb)s", {'task': str(task), 'cb': str(task._result_callback)}) LOG.debug("Task %(task)s return %(status)s", {'task': str(task), 'status': task.status}) task._finished() def _check_pending_tasks(self): """Check all pending tasks status.""" for resource_id in self._tasks.keys(): if self._stopped: # Task manager is stopped, return now return tasks = self._tasks[resource_id] # only the first task is executed and pending task = tasks[0] try: status = task._status_callback(task) except Exception: LOG.exception("Task %(task)s encountered exception in " "%(cb)s", {'task': str(task), 'cb': str(task._status_callback)}) status = constants.TaskStatus.ERROR task._update_status(status) if status != constants.TaskStatus.PENDING: self._dequeue(task, True) def _enqueue(self, task): if task.resource_id in self._tasks: # append to existing resource queue for ordered processing self._tasks[task.resource_id].append(task) else: # put the task to a new resource queue tasks = collections.deque() tasks.append(task) self._tasks[task.resource_id] = tasks def _dequeue(self, task, run_next): self._result(task) tasks = self._tasks[task.resource_id] tasks.remove(task) if not tasks: # no more tasks for this resource del self._tasks[task.resource_id] return if run_next: # process next task for this resource while tasks: task = tasks[0] status = self._execute(task) if status == constants.TaskStatus.PENDING: break self._dequeue(task, False) def _abort(self): """Abort all tasks.""" # put all tasks haven't been received by main thread to queue # so the following abort handling can cover them for t in self._tasks_queue: self._enqueue(t) self._tasks_queue.clear() resources = copy.deepcopy(self._tasks) for resource_id in resources.keys(): tasks = list(self._tasks[resource_id]) for task in tasks: task._update_status(constants.TaskStatus.ABORT) self._dequeue(task, False) def _get_task(self): """Get task request.""" while True: for t in self._tasks_queue: return self._tasks_queue.popleft() self._req.wait() self._req.reset() def run(self): while True: try: if self._stopped: # Gracefully terminate this thread if the _stopped # attribute was set to true LOG.info("Stopping TaskManager") break # get a task from queue, or timeout for periodic status check task = self._get_task() if task.resource_id in self._tasks: # this resource already has some tasks under processing, # append the task to same queue for ordered processing self._enqueue(task) continue try: self._main_thread_exec_task = task self._execute(task) finally: self._main_thread_exec_task = None if task.status is None: # The thread is killed during _execute(). To guarantee # the task been aborted correctly, put it to the queue. self._enqueue(task) elif task.status != constants.TaskStatus.PENDING: self._result(task) else: self._enqueue(task) except Exception: LOG.exception("TaskManager terminating because " "of an exception") break def add(self, task): task.id = uuid.uuid1() self._tasks_queue.append(task) if not self._req.ready(): self._req.send() return task.id def stop(self): if self._thread is None: return self._stopped = True self._thread.kill() self._thread = None # Stop looping call and abort running tasks self._monitor.stop() if self._monitor_busy: self._monitor.wait() self._abort() LOG.info("TaskManager terminated") def has_pending_task(self): if self._tasks_queue or self._tasks or self._main_thread_exec_task: return True else: return False def show_pending_tasks(self): for task in self._tasks_queue: LOG.info(str(task)) for resource, tasks in six.iteritems(self._tasks): for task in tasks: LOG.info(str(task)) if self._main_thread_exec_task: LOG.info(str(self._main_thread_exec_task)) def count(self): count = 0 for resource_id, tasks in six.iteritems(self._tasks): count += len(tasks) return count def start(self, interval=None): def _inner(): self.run() def _loopingcall_callback(): self._monitor_busy = True try: self._check_pending_tasks() except Exception: LOG.exception("Exception in _check_pending_tasks") self._monitor_busy = False if self._thread is not None: return self if interval is None or interval == 0: interval = self._interval self._stopped = False self._thread = greenthread.spawn(_inner) self._monitor = loopingcall.FixedIntervalLoopingCall( _loopingcall_callback) self._monitor.start(interval / 1000.0, interval / 1000.0) # To allow the created thread start running greenthread.sleep(0) return self @classmethod def set_default_interval(cls, interval): cls._default_interval = interval vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/tasks/__init__.py0000666000175100017510000000000013244523345025626 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/tasks/constants.py0000666000175100017510000000260613244523345026121 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class TaskStatus(object): """Task running status. This is used by execution/status callback function to notify the task manager what's the status of current task, and also used for indication the final task execution result. """ PENDING = 1 COMPLETED = 2 ERROR = 3 ABORT = 4 class TaskState(object): """Current state of a task. This is to keep track of the current state of a task. NONE: the task is still in the queue START: the task is pull out from the queue and is about to be executed EXECUTED: the task has been executed STATUS: we're running periodic status check for this task RESULT: the task has finished and result is ready """ NONE = -1 START = 0 EXECUTED = 1 STATUS = 2 RESULT = 3 vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/vshield/nsxv_edge_cfg_obj.py0000666000175100017510000000340713244523345026413 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_serialization import jsonutils import six from vmware_nsx.plugins.nsx_v.vshield import vcns @six.add_metaclass(abc.ABCMeta) class NsxvEdgeCfgObj(object): def __init__(self): return @abc.abstractmethod def get_service_name(self): return @abc.abstractmethod def serializable_payload(self): return @staticmethod def get_object(vcns_obj, edge_id, service_name): uri = "%s/%s/%s" % (vcns.URI_PREFIX, edge_id, service_name) h, v = vcns_obj.do_request( vcns.HTTP_GET, uri, decode=True) return v def submit_to_backend(self, vcns_obj, edge_id): uri = "%s/%s/%s/config" % (vcns.URI_PREFIX, edge_id, self.get_service_name()) payload = jsonutils.dumps(self.serializable_payload(), sort_keys=True) if payload: return vcns_obj.do_request( vcns.HTTP_PUT, uri, payload, format='json', encode=False) vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/housekeeper/0000775000175100017510000000000013244524600023254 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/housekeeper/error_backup_edge.py0000666000175100017510000001060013244523345027274 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from oslo_log import log from sqlalchemy.orm import exc as sa_exc from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.common.housekeeper import base_job from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import constants as vcns_const LOG = log.getLogger(__name__) class ErrorBackupEdgeJob(base_job.BaseJob): def __init__(self, readonly): super(ErrorBackupEdgeJob, self).__init__(readonly) self.azs = nsx_az.NsxVAvailabilityZones() def get_project_plugin(self, plugin): return plugin.get_plugin_by_type(projectpluginmap.NsxPlugins.NSX_V) def get_name(self): return 'error_backup_edge' def get_description(self): return 'revalidate backup Edge appliances in ERROR state' def run(self, context): super(ErrorBackupEdgeJob, self).run(context) # Gather ERROR state backup edges into dict filters = {'status': [constants.ERROR]} like_filters = {'router_id': vcns_const.BACKUP_ROUTER_PREFIX + "%"} with locking.LockManager.get_lock('nsx-edge-backup-pool'): error_edge_bindings = nsxv_db.get_nsxv_router_bindings( context.session, filters=filters, like_filters=like_filters) if not error_edge_bindings: LOG.debug('Housekeeping: no backup edges in ERROR state detected') return # Keep list of current broken backup edges - as it may change while # HK is running for binding in error_edge_bindings: LOG.warning('Housekeeping: Backup Edge appliance %s is in ERROR' ' state', binding['edge_id']) if not self.readonly: with locking.LockManager.get_lock(binding['edge_id']): self._handle_backup_edge(context, binding) def _handle_backup_edge(self, context, binding): dist = (binding['edge_type'] == nsxv_constants.VDR_EDGE) az = self.azs.get_availability_zone( binding['availability_zone']) try: update_result = self.plugin.nsx_v.update_edge( context, binding['router_id'], binding['edge_id'], binding['router_id'], None, appliance_size=binding['appliance_size'], dist=dist, availability_zone=az) if update_result: nsxv_db.update_nsxv_router_binding( context.session, binding['router_id'], status=constants.ACTIVE) except Exception as e: LOG.error('Housekeeping: failed to recover Edge ' 'appliance %s with exception %s', binding['edge_id'], e) update_result = False if not update_result: LOG.warning('Housekeeping: failed to recover Edge ' 'appliance %s, trying to delete', binding['edge_id']) self._delete_edge(context, binding, dist) def _delete_edge(self, context, binding, dist): try: nsxv_db.update_nsxv_router_binding( context.session, binding['router_id'], status=constants.PENDING_DELETE) except sa_exc.NoResultFound: LOG.debug("Housekeeping: Router binding %s does not exist.", binding['router_id']) try: self.plugin.nsx_v.delete_edge(context, binding['router_id'], binding['edge_id'], dist=dist) except Exception as e: LOG.warning('Housekeeping: Failed to delete edge %s with ' 'exception %s', binding['edge_id'], e) vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/housekeeper/__init__.py0000666000175100017510000000000013244523345025362 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/housekeeper/error_dhcp_edge.py0000666000175100017510000003006213244523345026751 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from oslo_log import log from oslo_utils import uuidutils from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.common.housekeeper import base_job from vmware_nsx.plugins.nsx_v.vshield.common import constants as vcns_const LOG = log.getLogger(__name__) class ErrorDhcpEdgeJob(base_job.BaseJob): def get_project_plugin(self, plugin): return plugin.get_plugin_by_type(projectpluginmap.NsxPlugins.NSX_V) def get_name(self): return 'error_dhcp_edge' def get_description(self): return 'revalidate DHCP Edge appliances in ERROR state' def run(self, context): super(ErrorDhcpEdgeJob, self).run(context) # Gather ERROR state DHCP edges into dict filters = {'status': [constants.ERROR]} error_edge_bindings = nsxv_db.get_nsxv_router_bindings( context.session, filters=filters) if not error_edge_bindings: LOG.debug('Housekeeping: no DHCP edges in ERROR state detected') return with locking.LockManager.get_lock('nsx-dhcp-edge-pool'): edge_dict = {} for binding in error_edge_bindings: if binding['router_id'].startswith( vcns_const.DHCP_EDGE_PREFIX): bind_list = edge_dict.get(binding['edge_id'], []) bind_list.append(binding) edge_dict[binding['edge_id']] = bind_list # Get valid neutron networks and create a prefix dict. networks = [net['id'] for net in self.plugin.get_networks(context, fields=['id'])] pfx_dict = {net[:36 - len(vcns_const.DHCP_EDGE_PREFIX)]: net for net in networks} for edge_id in edge_dict.keys(): try: self._validate_dhcp_edge( context, edge_dict, pfx_dict, networks, edge_id) except Exception as e: LOG.error('Failed to recover DHCP Edge %s (%s)', edge_id, e) def _validate_dhcp_edge( self, context, edge_dict, pfx_dict, networks, edge_id): # Also metadata network should be a valid network for the edge az_name = self.plugin.get_availability_zone_name_by_edge(context, edge_id) with locking.LockManager.get_lock(edge_id): vnic_binds = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, edge_id) edge_networks = [bind['network_id'] for bind in vnic_binds] # Step (A) # Find router bindings which are mapped to dead networks, or # do not have interfaces registered in nsxv tables for binding in edge_dict[edge_id]: router_id = binding['router_id'] net_pfx = router_id[len(vcns_const.DHCP_EDGE_PREFIX):] net_id = pfx_dict.get(net_pfx) if net_id is None: # Delete router binding as we do not have such network # in Neutron LOG.warning('Housekeeping: router binding %s for edge ' '%s has no matching neutron network', router_id, edge_id) if not self.readonly: nsxv_db.delete_nsxv_router_binding( context.session, binding['router_id']) else: if net_id not in edge_networks: # Create vNic bind here LOG.warning('Housekeeping: edge %s vnic binding ' 'missing for network %s', edge_id, net_id) if not self.readonly: nsxv_db.allocate_edge_vnic_with_tunnel_index( context.session, edge_id, net_id, az_name) # Step (B) # Find vNic bindings which reference invalid networks or aren't # bound to any router binding # Reread vNic binds as we might created more or deleted some in # step (A) vnic_binds = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, edge_id) for bind in vnic_binds: if bind['network_id'] not in networks: LOG.warning('Housekeeping: edge vnic binding for edge ' '%s is for invalid network id %s', edge_id, bind['network_id']) if not self.readonly: nsxv_db.free_edge_vnic_by_network( context.session, edge_id, bind['network_id']) # Step (C) # Verify that backend is in sync with Neutron # Reread vNic binds as we might deleted some in step (B) vnic_binds = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, edge_id) # Transform to network-keyed dict vnic_dict = {vnic['network_id']: { 'vnic_index': vnic['vnic_index'], 'tunnel_index': vnic['tunnel_index'] } for vnic in vnic_binds} backend_vnics = self.plugin.nsx_v.vcns.get_interfaces( edge_id)[1].get('vnics', []) if_changed = {} self._validate_edge_subinterfaces( context, edge_id, backend_vnics, vnic_dict, if_changed) self._add_missing_subinterfaces( context, edge_id, vnic_binds, backend_vnics, if_changed) if not self.readonly: for vnic in backend_vnics: if if_changed[vnic['index']]: self.plugin.nsx_v.vcns.update_interface(edge_id, vnic) self._update_router_bindings(context, edge_id) def _validate_edge_subinterfaces(self, context, edge_id, backend_vnics, vnic_dict, if_changed): # Validate that all the interfaces on the Edge # appliance are registered in nsxv_edge_vnic_bindings for vnic in backend_vnics: if_changed[vnic['index']] = False if (vnic['isConnected'] and vnic['type'] == 'trunk' and vnic['subInterfaces']): for sub_if in vnic['subInterfaces']['subInterfaces']: # Subinterface name field contains the net id vnic_bind = vnic_dict.get(sub_if['logicalSwitchName']) if (vnic_bind and vnic_bind['vnic_index'] == vnic['index'] and vnic_bind['tunnel_index'] == sub_if['tunnelId']): pass else: LOG.warning('Housekeeping: subinterface %s for vnic ' '%s on edge %s is not defined in ' 'nsxv_edge_vnic_bindings', sub_if['tunnelId'], vnic['index'], edge_id) if_changed[vnic['index']] = True vnic['subInterfaces']['subInterfaces'].remove(sub_if) def _add_missing_subinterfaces(self, context, edge_id, vnic_binds, backend_vnics, if_changed): # Verify that all the entries in # nsxv_edge_vnic_bindings are attached on the Edge # Arrange the vnic binds in a list of lists - vnics and subinterfaces metadata_nets = [ net['network_id'] for net in nsxv_db.get_nsxv_internal_networks( context.session, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE)] for vnic_bind in vnic_binds: if vnic_bind['network_id'] in metadata_nets: continue for vnic in backend_vnics: if vnic['index'] == vnic_bind['vnic_index']: found = False tunnel_index = vnic_bind['tunnel_index'] network_id = vnic_bind['network_id'] for sub_if in (vnic.get('subInterfaces', {}).get( 'subInterfaces', [])): if sub_if['tunnelId'] == tunnel_index: found = True if sub_if.get('logicalSwitchName') != network_id: LOG.warning('Housekeeping: subinterface %s on ' 'vnic %s on edge %s should be ' 'connected to network %s', tunnel_index, vnic['index'], edge_id, network_id) if_changed[vnic['index']] = True if not self.readonly: self._recreate_vnic_subinterface( context, network_id, edge_id, vnic, tunnel_index) sub_if['name'] = network_id if not found: LOG.warning('Housekeeping: subinterface %s on vnic ' '%s on edge %s should be connected to ' 'network %s but is missing', tunnel_index, vnic['index'], edge_id, network_id) if_changed[vnic['index']] = True if not self.readonly: self._recreate_vnic_subinterface( context, network_id, edge_id, vnic, tunnel_index) def _recreate_vnic_subinterface( self, context, network_id, edge_id, vnic, tunnel_index): vnic_index = vnic['index'] network_name_item = [edge_id, str(vnic_index), str(tunnel_index)] network_name = ('-'.join(network_name_item) + uuidutils.generate_uuid())[:36] port_group_id = vnic.get('portgroupId') address_groups = self.plugin._create_network_dhcp_address_group( context, network_id) port_group_id, iface = self.plugin.edge_manager._create_sub_interface( context, network_id, network_name, tunnel_index, address_groups, port_group_id) if not vnic.get('subInterfaces'): vnic['subInterfaces'] = {'subInterfaces': []} vnic['subInterfaces']['subInterfaces'].append(iface) if vnic['type'] != 'trunk': # reinitialize the interface as it is missing config vnic['name'] = (vcns_const.INTERNAL_VNIC_NAME + str(vnic['index'])) vnic['type'] = 'trunk' vnic['portgroupId'] = port_group_id vnic['mtu'] = 1500 vnic['enableProxyArp'] = False vnic['enableSendRedirects'] = True vnic['isConnected'] = True def _update_router_bindings(self, context, edge_id): edge_router_binds = nsxv_db.get_nsxv_router_bindings_by_edge( context.session, edge_id) for b in edge_router_binds: nsxv_db.update_nsxv_router_binding( context.session, b['router_id'], status='ACTIVE') vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/__init__.py0000666000175100017510000000000013244523345023043 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/managers.py0000666000175100017510000000700413244523345023114 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import stevedore from oslo_log import log from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc LOG = log.getLogger(__name__) ROUTER_TYPE_DRIVERS = ["distributed", "exclusive", "shared"] class RouterTypeManager(stevedore.named.NamedExtensionManager): """Manage router segment types using drivers.""" def __init__(self, plugin): # Mapping from type name to DriverManager self.drivers = {} LOG.info("Configured router type driver names: %s", ROUTER_TYPE_DRIVERS) super(RouterTypeManager, self).__init__( 'vmware_nsx.neutron.nsxv.router_type_drivers', ROUTER_TYPE_DRIVERS, invoke_on_load=True, invoke_args=(plugin,)) LOG.info("Loaded type driver names: %s", self.names()) self._register_types() self._check_tenant_router_types(cfg.CONF.nsxv.tenant_router_types) def _register_types(self): for ext in self: router_type = ext.obj.get_type() if router_type in self.drivers: LOG.error("Type driver '%(new_driver)s' ignored because " "type driver '%(old_driver)s' is already " "registered for type '%(type)s'", {'new_driver': ext.name, 'old_driver': self.drivers[router_type].name, 'type': router_type}) else: self.drivers[router_type] = ext LOG.info("Registered types: %s", self.drivers.keys()) def _check_tenant_router_types(self, types): self.tenant_router_types = [] for router_type in types: if router_type in self.drivers: self.tenant_router_types.append(router_type) else: msg = _("No type driver for tenant router_type: %s. " "Service terminated!") % router_type LOG.error(msg) raise SystemExit(msg) LOG.info("Tenant router_types: %s", self.tenant_router_types) def get_tenant_router_driver(self, context, router_type): driver = self.drivers.get(router_type) if driver: return driver.obj raise nsx_exc.NoRouterAvailable() def decide_tenant_router_type(self, context, router_type=None): if router_type is None: for rt in self.tenant_router_types: driver = self.drivers.get(rt) if driver: return rt raise nsx_exc.NoRouterAvailable() elif context.is_admin: driver = self.drivers.get(router_type) if driver: return router_type elif router_type in self.tenant_router_types: driver = self.drivers.get(router_type) if driver: return router_type raise nsx_exc.NoRouterAvailable() vmware-nsx-12.0.1/vmware_nsx/plugins/nsx_v/plugin.py0000666000175100017510000067657113244523413022636 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from distutils import version import uuid import netaddr from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import extra_dhcp_opt as ext_edo from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import port_security as psec from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api.definitions import subnet as subnet_def from neutron_lib.api.definitions import vlantransparent as vlan_apidef from neutron_lib.api import validators from neutron_lib.api.validators import availability_zone as az_validator from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context as n_context from neutron_lib.db import constants as db_const from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import allowedaddresspairs as addr_exc from neutron_lib.exceptions import flavors as flav_exc from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.exceptions import multiprovidernet as mpnet_exc from neutron_lib.exceptions import port_security as psec_exc from neutron_lib.plugins import constants as plugin_const from neutron_lib.plugins import directory from neutron_lib.services.qos import constants as qos_consts from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import netutils from oslo_utils import uuidutils import six from six import moves from sqlalchemy.orm import exc as sa_exc from neutron.api import extensions as neutron_extensions from neutron.common import ipv6_utils from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils as n_utils from neutron.db import _resource_extend as resource_extend from neutron.db import _utils as db_utils from neutron.db import agents_db from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db import api as db_api from neutron.db.availability_zone import router as router_az_db from neutron.db import dns_db from neutron.db import external_net_db from neutron.db import extradhcpopt_db from neutron.db import extraroute_db from neutron.db import l3_attrs_db from neutron.db import l3_db from neutron.db import l3_gwmode_db from neutron.db.models import l3 as l3_db_models from neutron.db.models import securitygroup as securitygroup_model # noqa from neutron.db import models_v2 from neutron.db import portsecurity_db from neutron.db import quota_db # noqa from neutron.db import securitygroups_db from neutron.db import vlantransparent_db from neutron.extensions import providernet from neutron.extensions import securitygroup as ext_sg from neutron.objects import securitygroup from neutron.plugins.common import utils from neutron.quota import resource_registry from neutron.services.flavors import flavors_plugin from vmware_nsx.dvs import dvs from vmware_nsx.services.qos.common import utils as qos_com_utils from vmware_nsx.services.qos.nsx_v import driver as qos_driver from vmware_nsx.services.qos.nsx_v import utils as qos_utils import vmware_nsx from vmware_nsx._i18n import _ from vmware_nsx.common import availability_zones as nsx_com_az from vmware_nsx.common import config # noqa from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import l3_rpc_agent_api from vmware_nsx.common import locking from vmware_nsx.common import managers as nsx_managers from vmware_nsx.common import nsxv_constants from vmware_nsx.common import utils as c_utils from vmware_nsx.db import ( extended_security_group_rule as extend_sg_rule) from vmware_nsx.db import ( routertype as rt_rtr) from vmware_nsx.db import db as nsx_db from vmware_nsx.db import extended_security_group as extended_secgroup from vmware_nsx.db import maclearning as mac_db from vmware_nsx.db import nsx_portbindings_db as pbin_db from vmware_nsx.db import nsxv_db from vmware_nsx.db import vnic_index_db from vmware_nsx.extensions import ( advancedserviceproviders as as_providers) from vmware_nsx.extensions import ( vnicindex as ext_vnic_idx) from vmware_nsx.extensions import dhcp_mtu as ext_dhcp_mtu from vmware_nsx.extensions import dns_search_domain as ext_dns_search_domain from vmware_nsx.extensions import housekeeper as hk_ext from vmware_nsx.extensions import maclearning as mac_ext from vmware_nsx.extensions import nsxpolicy from vmware_nsx.extensions import projectpluginmap from vmware_nsx.extensions import providersecuritygroup as provider_sg from vmware_nsx.extensions import routersize from vmware_nsx.extensions import secgroup_rule_local_ip_prefix from vmware_nsx.extensions import securitygrouplogging as sg_logging from vmware_nsx.extensions import securitygrouppolicy as sg_policy from vmware_nsx.plugins.common.housekeeper import housekeeper from vmware_nsx.plugins.common import plugin as nsx_plugin_common from vmware_nsx.plugins.nsx import utils as tvd_utils from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v import managers from vmware_nsx.plugins.nsx_v import md_proxy as nsx_v_md_proxy from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield.common import ( exceptions as vsh_exc) from vmware_nsx.plugins.nsx_v.vshield import edge_firewall_driver from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.plugins.nsx_v.vshield import securitygroup_utils from vmware_nsx.plugins.nsx_v.vshield import vcns_driver from vmware_nsx.services.flowclassifier.nsx_v import utils as fc_utils from vmware_nsx.services.fwaas.nsx_v import fwaas_callbacks LOG = logging.getLogger(__name__) PORTGROUP_PREFIX = 'dvportgroup' ROUTER_SIZE = routersize.ROUTER_SIZE VALID_EDGE_SIZES = routersize.VALID_EDGE_SIZES SUBNET_RULE_NAME = 'Subnet Rule' DNAT_RULE_NAME = 'DNAT Rule' ALLOCATION_POOL_RULE_NAME = 'Allocation Pool Rule' NO_SNAT_RULE_NAME = 'No SNAT Rule' @resource_extend.has_resource_extenders class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, agents_db.AgentDbMixin, nsx_plugin_common.NsxPluginBase, rt_rtr.RouterType_mixin, external_net_db.External_net_db_mixin, extraroute_db.ExtraRoute_db_mixin, extradhcpopt_db.ExtraDhcpOptMixin, router_az_db.RouterAvailabilityZoneMixin, l3_gwmode_db.L3_NAT_db_mixin, pbin_db.NsxPortBindingMixin, portsecurity_db.PortSecurityDbMixin, extend_sg_rule.ExtendedSecurityGroupRuleMixin, securitygroups_db.SecurityGroupDbMixin, extended_secgroup.ExtendedSecurityGroupPropertiesMixin, vnic_index_db.VnicIndexDbMixin, dns_db.DNSDbMixin, nsxpolicy.NsxPolicyPluginBase, vlantransparent_db.Vlantransparent_db_mixin, nsx_com_az.NSXAvailabilityZonesPluginCommon, mac_db.MacLearningDbMixin, hk_ext.Housekeeper): supported_extension_aliases = ["agent", "allowed-address-pairs", "address-scope", "binding", "dns-search-domain", "dvr", "ext-gw-mode", "multi-provider", "port-security", "provider", "quotas", "external-net", "extra_dhcp_opt", "extraroute", "router", "security-group", "secgroup-rule-local-ip-prefix", "security-group-logging", "nsxv-router-type", "nsxv-router-size", "vnic-index", "advanced-service-providers", "subnet_allocation", "availability_zone", "network_availability_zone", "router_availability_zone", "l3-flavors", "flavors", "dhcp-mtu", "mac-learning", "housekeeper"] __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True @resource_registry.tracked_resources( network=models_v2.Network, port=models_v2.Port, subnet=models_v2.Subnet, subnetpool=models_v2.SubnetPool, security_group=securitygroup_model.SecurityGroup, security_group_rule=securitygroup_model.SecurityGroupRule, router=l3_db_models.Router, floatingip=l3_db_models.FloatingIP) def __init__(self): self._is_sub_plugin = tvd_utils.is_tvd_core_plugin() self.init_is_complete = False self.housekeeper = None super(NsxVPluginV2, self).__init__() if self._is_sub_plugin: extension_drivers = cfg.CONF.nsx_tvd.nsx_v_extension_drivers else: extension_drivers = cfg.CONF.nsx_extension_drivers self._extension_manager = nsx_managers.ExtensionManager( extension_drivers=extension_drivers) # Bind the dummy L3 notifications self.l3_rpc_notifier = l3_rpc_agent_api.L3NotifyAPI() self._extension_manager.initialize() self.supported_extension_aliases.extend( self._extension_manager.extension_aliases()) self.metadata_proxy_handler = None config.validate_nsxv_config_options() self._network_vlans = utils.parse_network_vlan_ranges( cfg.CONF.nsxv.network_vlan_ranges) neutron_extensions.append_api_extensions_path( [vmware_nsx.NSX_EXT_PATH]) # This needs to be set prior to binding callbacks if cfg.CONF.nsxv.use_dvs_features: self._vcm = dvs.VCManager() else: self._vcm = None # Create the client to interface with the NSX-v _nsx_v_callbacks = edge_utils.NsxVCallbacks(self) self.nsx_v = vcns_driver.VcnsDriver(_nsx_v_callbacks) # Use the existing class instead of creating a new instance self.lbv2_driver = self.nsx_v # Ensure that edges do concurrency self._ensure_lock_operations() self._validate_nsx_version() # Configure aggregate publishing self._aggregate_publishing() # Configure edge reservations self._configure_reservations() self.edge_manager = edge_utils.EdgeManager(self.nsx_v, self) self.nsx_sg_utils = securitygroup_utils.NsxSecurityGroupUtils( self.nsx_v) self.init_availability_zones() self._validate_config() self._use_nsx_policies = False if cfg.CONF.nsxv.use_nsx_policies: if not c_utils.is_nsxv_version_6_2(self.nsx_v.vcns.get_version()): error = (_("NSX policies are not supported for version " "%(ver)s.") % {'ver': self.nsx_v.vcns.get_version()}) raise nsx_exc.NsxPluginException(err_msg=error) # Support NSX policies in default security groups self._use_nsx_policies = True # enable the extension self.supported_extension_aliases.append("security-group-policy") self.supported_extension_aliases.append("nsx-policy") # Support transparent VLANS from 6.3.0 onwards. The feature is only # supported if the global configuration flag vlan_transparent is # True if cfg.CONF.vlan_transparent: if c_utils.is_nsxv_version_6_3(self.nsx_v.vcns.get_version()): self.supported_extension_aliases.append("vlan-transparent") else: LOG.warning("Transparent support only from " "NSX 6.3 onwards") self.sg_container_id = self._create_security_group_container() self.default_section = self._create_cluster_default_fw_section() self._process_security_groups_rules_logging() self._router_managers = managers.RouterTypeManager(self) # Make sure starting rpc listeners (for QoS and other agents) # will happen only once self.start_rpc_listeners_called = False # Init the FWaaS support self._init_fwaas() # Service insertion driver register self._si_handler = fc_utils.NsxvServiceInsertionHandler(self) registry.subscribe(self.add_vms_to_service_insertion, fc_utils.SERVICE_INSERTION_RESOURCE, events.AFTER_CREATE) # Subscribe to subnet pools changes registry.subscribe( self.on_subnetpool_address_scope_updated, resources.SUBNETPOOL_ADDRESS_SCOPE, events.AFTER_UPDATE) if c_utils.is_nsxv_version_6_2(self.nsx_v.vcns.get_version()): self.supported_extension_aliases.append("provider-security-group") # Bind QoS notifications qos_driver.register(self) # subscribe the init complete method last, so it will be called only # if init was successful registry.subscribe(self.init_complete, resources.PROCESS, events.AFTER_INIT) @staticmethod def plugin_type(): return projectpluginmap.NsxPlugins.NSX_V @staticmethod def is_tvd_plugin(): return False def init_complete(self, resource, event, trigger, payload=None): with locking.LockManager.get_lock('plugin-init-complete'): if self.init_is_complete: # Should be called only once per worker return has_metadata_cfg = ( cfg.CONF.nsxv.nova_metadata_ips and cfg.CONF.nsxv.mgt_net_moid and cfg.CONF.nsxv.mgt_net_proxy_ips and cfg.CONF.nsxv.mgt_net_proxy_netmask) if has_metadata_cfg: # Init md_proxy handler per availability zone self.metadata_proxy_handler = {} for az in self.get_azs_list(): # create metadata handler only if the az supports it. # if not, the global one will be used if az.supports_metadata(): self.metadata_proxy_handler[az.name] = ( nsx_v_md_proxy.NsxVMetadataProxyHandler( self, az)) self.housekeeper = housekeeper.NsxvHousekeeper( hk_ns='vmware_nsx.neutron.nsxv.housekeeper.jobs', hk_jobs=cfg.CONF.nsxv.housekeeping_jobs) self.init_is_complete = True def _validate_nsx_version(self): ver = self.nsx_v.vcns.get_version() if version.LooseVersion(ver) < version.LooseVersion('6.2.3'): error = _("Plugin version doesn't support NSX version %s.") % ver raise nsx_exc.NsxPluginException(err_msg=error) def get_metadata_proxy_handler(self, az_name): if not self.metadata_proxy_handler: return None if az_name in self.metadata_proxy_handler: return self.metadata_proxy_handler[az_name] # fallback to the global handler # Note(asarfaty): in case this is called during init_complete the # default availability zone may still not exist. return self.metadata_proxy_handler.get(nsx_az.DEFAULT_NAME) def add_vms_to_service_insertion(self, sg_id): def _add_vms_to_service_insertion(*args, **kwargs): """Adding existing VMs to the service insertion security group Adding all current compute ports with port security to the service insertion security group in order to classify their traffic by the security redirect rules """ sg_id = args[0] context = n_context.get_admin_context() filters = {'device_owner': ['compute:None']} ports = self.get_ports(context, filters=filters) for port in ports: # Only add compute ports with device-id, vnic & port security if (validators.is_attr_set(port.get(ext_vnic_idx.VNIC_INDEX)) and validators.is_attr_set(port.get('device_id')) and port[psec.PORTSECURITY]): try: vnic_idx = port[ext_vnic_idx.VNIC_INDEX] device_id = port['device_id'] vnic_id = self._get_port_vnic_id(vnic_idx, device_id) self._add_member_to_security_group(sg_id, vnic_id) except Exception as e: LOG.info('Could not add port %(port)s to service ' 'insertion security group. Exception ' '%(err)s', {'port': port['id'], 'err': e}) # Doing this in a separate thread to not slow down the init process # in case there are many compute ports c_utils.spawn_n(_add_vms_to_service_insertion, sg_id) def start_rpc_listeners(self): if self.start_rpc_listeners_called: # If called more than once - we should not create it again return self.conn.consume_in_threads() LOG.info("NSXV plugin: starting RPC listeners") self.endpoints = [agents_db.AgentExtRpcCallback()] self.topic = topics.PLUGIN self.conn = n_rpc.create_connection() self.conn.create_consumer(self.topic, self.endpoints, fanout=False) self.start_rpc_listeners_called = True return self.conn.consume_in_threads() def _init_fwaas(self): # Bind FWaaS callbacks to the driver self.fwaas_callbacks = fwaas_callbacks.NsxvFwaasCallbacks() def _create_security_group_container(self): name = "OpenStack Security Group container" with locking.LockManager.get_lock('security-group-container-init'): container_id = self.nsx_v.vcns.get_security_group_id(name) if not container_id: description = ("OpenStack Security Group Container, " "managed by Neutron nsx-v plugin.") container = {"securitygroup": {"name": name, "description": description}} h, container_id = ( self.nsx_v.vcns.create_security_group(container)) return container_id def _find_router_driver(self, context, router_id): router_qry = context.session.query(l3_db_models.Router) router_db = router_qry.filter_by(id=router_id).one() return self._get_router_driver(context, router_db) def _get_router_driver(self, context, router_db): router_type_dict = {} self._extend_nsx_router_dict(router_type_dict, router_db) router_type = None if router_type_dict.get("distributed", False): router_type = "distributed" else: router_type = router_type_dict.get("router_type") return self._router_managers.get_tenant_router_driver( context, router_type) def _decide_router_type(self, context, r): router_type = None if (validators.is_attr_set(r.get("distributed")) and r.get("distributed")): router_type = "distributed" if validators.is_attr_set(r.get("router_type")): err_msg = _('Can not support router_type extension for ' 'distributed router') raise n_exc.InvalidInput(error_message=err_msg) elif validators.is_attr_set(r.get("router_type")): router_type = r.get("router_type") router_type = self._router_managers.decide_tenant_router_type( context, router_type) if router_type == "distributed": r["distributed"] = True r["router_type"] = "exclusive" else: r["distributed"] = False r["router_type"] = router_type @staticmethod @resource_extend.extends([l3_apidef.ROUTERS]) def _extend_nsx_router_dict(router_res, router_db): router_type_obj = rt_rtr.RouterType_mixin() router_type_obj._extend_nsx_router_dict( router_res, router_db, router_type_obj.nsx_attributes) def _create_cluster_default_fw_section(self): section_name = 'OS Cluster Security Group section' # Default cluster rules rules = [{'name': 'Default DHCP rule for OS Security Groups', 'action': 'allow', 'services': [('17', '67', None, None), ('17', '68', None, None)]}, {'name': 'Default ICMPv6 rule for OS Security Groups', 'action': 'allow', 'services': [('58', None, constants.ICMPV6_TYPE_NS, None), ('58', None, constants.ICMPV6_TYPE_NA, None), ('58', None, constants.ICMPV6_TYPE_RA, None), ('58', None, constants.ICMPV6_TYPE_MLD_QUERY, None)]}, {'name': 'Default DHCPv6 rule for OS Security Groups', 'action': 'allow', 'services': [('17', '546', None, None), ('17', '547', None, None)]}] if cfg.CONF.nsxv.cluster_moid: applied_to_ids = cfg.CONF.nsxv.cluster_moid applied_to_type = 'ClusterComputeResource' else: applied_to_ids = [self.sg_container_id] applied_to_type = 'SecurityGroup' rule_list = [] for rule in rules: rule_config = self.nsx_sg_utils.get_rule_config( applied_to_ids, rule['name'], rule['action'], applied_to_type, services=rule['services'], logged=cfg.CONF.nsxv.log_security_groups_allowed_traffic) rule_list.append(rule_config) igmp_names = ['IGMP Membership Query', 'IGMP V2 Membership Report', 'IGMP V3 Membership Report', 'IGMP Leave Group'] igmp_ids = [] for name in igmp_names: igmp_id = self._get_appservice_id(name) if igmp_id: igmp_ids.append(igmp_id) if igmp_ids: rules = [{'name': 'Default IGMP rule for OS Security Groups', 'action': 'allow', 'service_ids': igmp_ids}] for rule in rules: rule_config = self.nsx_sg_utils.get_rule_config( applied_to_ids, rule['name'], rule['action'], applied_to_type, application_services=rule['service_ids'], logged=cfg.CONF.nsxv.log_security_groups_allowed_traffic) rule_list.append(rule_config) # Default security-group rules block_rule = self.nsx_sg_utils.get_rule_config( [self.sg_container_id], 'Block All', 'deny', logged=cfg.CONF.nsxv.log_security_groups_blocked_traffic) rule_list.append(block_rule) with locking.LockManager.get_lock('default-section-init'): section_id = self.nsx_v.vcns.get_section_id(section_name) section = ( self.nsx_sg_utils.get_section_with_rules( section_name, rule_list, section_id)) section_req_body = self.nsx_sg_utils.to_xml_string(section) if section_id: self.nsx_v.vcns.update_section_by_id( section_id, 'ip', section_req_body) else: # cluster section does not exists. Create it above the # default l3 section l3_id = self.nsx_v.vcns.get_default_l3_id() h, c = self.nsx_v.vcns.create_section('ip', section_req_body, insert_before=l3_id) section_id = self.nsx_sg_utils.parse_and_get_section_id(c) return section_id def _process_security_groups_rules_logging(self): def process_security_groups_rules_logging(*args, **kwargs): with locking.LockManager.get_lock('nsx-dfw-section', lock_file_prefix='dfw-section'): context = n_context.get_admin_context() log_allowed = cfg.CONF.nsxv.log_security_groups_allowed_traffic # If the section/sg is already logged, then no action is # required. for sg in [sg for sg in self.get_security_groups(context) if sg.get(sg_logging.LOGGING) is False]: if sg.get(sg_policy.POLICY): # Logging is not relevant with a policy continue section_uri = self._get_section_uri(context.session, sg['id']) if section_uri is None: continue # Section/sg is not logged, update rules logging according # to the 'log_security_groups_allowed_traffic' config # option. try: h, c = self.nsx_v.vcns.get_section(section_uri) section = self.nsx_sg_utils.parse_section(c) section_needs_update = ( self.nsx_sg_utils.set_rules_logged_option( section, log_allowed)) if section_needs_update: self.nsx_v.vcns.update_section( section_uri, self.nsx_sg_utils.to_xml_string(section), h) except Exception as exc: LOG.error('Unable to update security group %(sg)s ' 'section for logging. %(e)s', {'e': exc, 'sg': sg['id']}) c_utils.spawn_n(process_security_groups_rules_logging) def _create_dhcp_static_binding(self, context, neutron_port_db): network_id = neutron_port_db['network_id'] device_owner = neutron_port_db['device_owner'] if device_owner.startswith("compute"): s_bindings = self.edge_manager.create_static_binding( context, neutron_port_db) self.edge_manager.create_dhcp_bindings( context, neutron_port_db['id'], network_id, s_bindings) def _delete_dhcp_static_binding(self, context, neutron_port_db): network_id = neutron_port_db['network_id'] try: self.edge_manager.delete_dhcp_binding( context, neutron_port_db['id'], network_id, neutron_port_db['mac_address']) except Exception as e: LOG.error('Unable to delete static bindings for %(id)s. ' 'Error: %(e)s', {'id': neutron_port_db['id'], 'e': e}) def _validate_network_qos(self, network, backend_network): err_msg = None if validators.is_attr_set(network.get(qos_consts.QOS_POLICY_ID)): if not backend_network: err_msg = (_("Cannot configure QOS on external networks")) if not cfg.CONF.nsxv.use_dvs_features: err_msg = (_("Cannot configure QOS " "without enabling use_dvs_features")) if err_msg: raise n_exc.InvalidInput(error_message=err_msg) def _get_network_az_from_net_data(self, net_data): if az_def.AZ_HINTS in net_data and net_data[az_def.AZ_HINTS]: return self._availability_zones_data.get_availability_zone( net_data[az_def.AZ_HINTS][0]) return self.get_default_az() def _get_network_az_dvs_id(self, net_data): az = self._get_network_az_from_net_data(net_data) return az.dvs_id def _get_network_vdn_scope_id(self, net_data): az = self._get_network_az_from_net_data(net_data) return az.vdn_scope_id def _validate_provider_create(self, context, network): if not validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)): return az_dvs = self._get_network_az_dvs_id(network) for segment in network[mpnet_apidef.SEGMENTS]: network_type = segment.get(pnet.NETWORK_TYPE) physical_network = segment.get(pnet.PHYSICAL_NETWORK) segmentation_id = segment.get(pnet.SEGMENTATION_ID) network_type_set = validators.is_attr_set(network_type) segmentation_id_set = validators.is_attr_set(segmentation_id) physical_network_set = validators.is_attr_set(physical_network) err_msg = None if not network_type_set: err_msg = _("%s required") % pnet.NETWORK_TYPE elif network_type == c_utils.NsxVNetworkTypes.FLAT: if segmentation_id_set: err_msg = _("Segmentation ID cannot be specified with " "flat network type") elif network_type == c_utils.NsxVNetworkTypes.VLAN: if not segmentation_id_set: if physical_network_set: if physical_network not in self._network_vlans: err_msg = _("Invalid physical network for " "segmentation ID allocation") else: err_msg = _("Segmentation ID must be specified with " "vlan network type") elif (segmentation_id_set and not utils.is_valid_vlan_tag(segmentation_id)): err_msg = (_("%(segmentation_id)s out of range " "(%(min_id)s through %(max_id)s)") % {'segmentation_id': segmentation_id, 'min_id': constants.MIN_VLAN_TAG, 'max_id': constants.MAX_VLAN_TAG}) else: # Verify segment is not already allocated bindings = nsxv_db.get_network_bindings_by_vlanid( context.session, segmentation_id) if bindings: dvs_ids = self._get_dvs_ids(physical_network, az_dvs) for phy_uuid in dvs_ids: for binding in bindings: if binding['phy_uuid'] == phy_uuid: raise n_exc.VlanIdInUse( vlan_id=segmentation_id, physical_network=phy_uuid) elif network_type == c_utils.NsxVNetworkTypes.VXLAN: # Currently unable to set the segmentation id if segmentation_id_set: err_msg = _("Segmentation ID cannot be set with VXLAN") elif network_type == c_utils.NsxVNetworkTypes.PORTGROUP: external = network.get(extnet_apidef.EXTERNAL) if segmentation_id_set: err_msg = _("Segmentation ID cannot be set with portgroup") if not physical_network_set: err_msg = _("Physical network must be set!") elif not self.nsx_v.vcns.validate_network(physical_network): err_msg = _("Physical network doesn't exist") # A provider network portgroup will need the network name to # match the portgroup name elif ((not validators.is_attr_set(external) or validators.is_attr_set(external) and not external) and not self.nsx_v.vcns.validate_network_name( physical_network, network['name'])): err_msg = _("Portgroup name must match network name") # make sure no other neutron network is using it bindings = ( nsxv_db.get_network_bindings_by_physical_net_and_type( context.elevated().session, physical_network, network_type)) if bindings: err_msg = (_('protgroup %s is already used by ' 'another network') % physical_network) else: err_msg = (_("%(net_type_param)s %(net_type_value)s not " "supported") % {'net_type_param': pnet.NETWORK_TYPE, 'net_type_value': network_type}) if err_msg: raise n_exc.InvalidInput(error_message=err_msg) # TODO(salvatore-orlando): Validate tranport zone uuid # which should be specified in physical_network def _validate_network_type(self, context, network_id, net_types): bindings = nsxv_db.get_network_bindings(context.session, network_id) multiprovider = nsx_db.is_multiprovider_network(context.session, network_id) if bindings: if not multiprovider: return bindings[0].binding_type in net_types else: for binding in bindings: if binding.binding_type not in net_types: return False return True return False def _extend_network_dict_provider(self, context, network, multiprovider=None, bindings=None): if 'id' not in network: return if not bindings: bindings = nsxv_db.get_network_bindings(context.session, network['id']) if not multiprovider: multiprovider = nsx_db.is_multiprovider_network(context.session, network['id']) # With NSX plugin 'normal' overlay networks will have no binding # TODO(salvatore-orlando) make sure users can specify a distinct # phy_uuid as 'provider network' for STT net type if bindings: if not multiprovider: # network came in through provider networks api network[pnet.NETWORK_TYPE] = bindings[0].binding_type network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id else: # network come in though multiprovider networks api network[mpnet_apidef.SEGMENTS] = [ {pnet.NETWORK_TYPE: binding.binding_type, pnet.PHYSICAL_NETWORK: binding.phy_uuid, pnet.SEGMENTATION_ID: binding.vlan_id} for binding in bindings] # update availability zones network[az_def.COLLECTION_NAME] = ( self._get_network_availability_zones(context, network)) def _get_subnet_as_providers(self, context, subnet, nw_dict=None): net_id = subnet.get('network_id') if net_id is None: net_id = self.get_subnet(context, subnet['id']).get('network_id') if nw_dict: providers = nw_dict.get(net_id, []) else: as_provider_data = nsxv_db.get_edge_vnic_bindings_by_int_lswitch( context.session, net_id) providers = [asp['edge_id'] for asp in as_provider_data] return providers def get_subnet(self, context, id, fields=None): subnet = super(NsxVPluginV2, self).get_subnet(context, id, fields) if not context.is_admin: return subnet elif fields and as_providers.ADV_SERVICE_PROVIDERS in fields: subnet[as_providers.ADV_SERVICE_PROVIDERS] = ( self._get_subnet_as_providers(context, subnet)) return subnet def get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): subnets = super(NsxVPluginV2, self).get_subnets(context, filters, fields, sorts, limit, marker, page_reverse) if not context.is_admin or (not filters and not fields): return subnets new_subnets = [] if ((fields and as_providers.ADV_SERVICE_PROVIDERS in fields) or (filters and filters.get(as_providers.ADV_SERVICE_PROVIDERS))): # This ugly mess should reduce DB calls with network_id field # as filter - as network_id is not indexed vnic_binds = nsxv_db.get_edge_vnic_bindings_with_networks( context.session) nw_dict = {} for vnic_bind in vnic_binds: if nw_dict.get(vnic_bind['network_id']): nw_dict[vnic_bind['network_id']].append( vnic_bind['edge_id']) else: nw_dict[vnic_bind['network_id']] = [vnic_bind['edge_id']] # We only deal metadata provider field when: # - adv_service_provider is explicitly retrieved # - adv_service_provider is used in a filter for subnet in subnets: as_provider = self._get_subnet_as_providers( context, subnet, nw_dict) md_filter = ( None if filters is None else filters.get(as_providers.ADV_SERVICE_PROVIDERS)) if md_filter is None or len(set(as_provider) & set(md_filter)): # Include metadata_providers only if requested in results if fields and as_providers.ADV_SERVICE_PROVIDERS in fields: subnet[as_providers.ADV_SERVICE_PROVIDERS] = ( as_provider) new_subnets.append(subnet) else: # No need to handle metadata providers field return subnets return new_subnets def _convert_to_transport_zones_dict(self, network): """Converts the provider request body to multiprovider. Returns: True if request is multiprovider False if provider and None if neither. """ if any(validators.is_attr_set(network.get(f)) for f in (pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)): if validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)): raise mpnet_exc.SegmentsSetInConjunctionWithProviders() # convert to transport zone list network[mpnet_apidef.SEGMENTS] = [ {pnet.NETWORK_TYPE: network[pnet.NETWORK_TYPE], pnet.PHYSICAL_NETWORK: network[pnet.PHYSICAL_NETWORK], pnet.SEGMENTATION_ID: network[pnet.SEGMENTATION_ID]}] del network[pnet.NETWORK_TYPE] del network[pnet.PHYSICAL_NETWORK] del network[pnet.SEGMENTATION_ID] return False if validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)): return True def _delete_backend_network(self, moref, dvs_id=None): """Deletes the backend NSX network. This can either be a VXLAN or a VLAN network. The type is determined by the prefix of the moref. The dvs_id is relevant only if it is a vlan network """ if moref.startswith(PORTGROUP_PREFIX): self.nsx_v.delete_port_group(dvs_id, moref) else: self.nsx_v.delete_virtual_wire(moref) def _get_vlan_network_name(self, net_data, dvs_id): if net_data.get('name') is None: net_data['name'] = '' # Maximum name length is 80 characters. 'id' length is 36 # maximum prefix for name plus dvs-id is 43 if net_data['name'] == '': prefix = dvs_id[:43] else: prefix = ('%s-%s' % (dvs_id, net_data['name']))[:43] return '%s-%s' % (prefix, net_data['id']) def _update_network_teaming(self, dvs_id, net_id, net_moref): if self._vcm: try: h, switch = self.nsx_v.vcns.get_vdn_switch(dvs_id) except Exception as e: LOG.warning('DVS %s not registered on NSX. Unable to ' 'update teaming for network %s', dvs_id, net_id) return try: self._vcm.update_port_groups_config( dvs_id, net_id, net_moref, self._vcm.update_port_group_spec_teaming, switch) except Exception as e: LOG.error('Unable to update teaming information for ' 'net %(net_id)s. Error: %(e)s', {'net_id': net_id, 'e': e}) def _create_vlan_network_at_backend(self, net_data, dvs_id): network_name = self._get_vlan_network_name(net_data, dvs_id) segment = net_data[mpnet_apidef.SEGMENTS][0] vlan_tag = 0 if (segment.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.VLAN): vlan_tag = segment.get(pnet.SEGMENTATION_ID, 0) portgroup = {'vlanId': vlan_tag, 'networkBindingType': 'Static', 'networkName': network_name, 'networkType': 'Isolation'} config_spec = {'networkSpec': portgroup} try: h, c = self.nsx_v.vcns.create_port_group(dvs_id, config_spec) except Exception as e: error = (_("Failed to create port group on DVS: %(dvs_id)s. " "Reason: %(reason)s") % {'dvs_id': dvs_id, 'reason': e.response}) raise nsx_exc.NsxPluginException(err_msg=error) self._update_network_teaming(dvs_id, net_data['id'], c) return c def _get_dvs_ids(self, physical_network, default_dvs): """Extract DVS-IDs provided in the physical network field. If physical network attribute is not set, return the pre configured dvs-id from nsx.ini file, otherwise convert physical network string to a list of unique DVS-IDs. """ if not validators.is_attr_set(physical_network): return [default_dvs] # Return unique DVS-IDs only and ignore duplicates return list(set( dvs.strip() for dvs in physical_network.split(',') if dvs)) def _add_member_to_security_group(self, sg_id, vnic_id): with locking.LockManager.get_lock('neutron-security-ops' + str(sg_id)): try: self.nsx_v.vcns.add_member_to_security_group( sg_id, vnic_id) LOG.info("Added %(sg_id)s member to NSX security " "group %(vnic_id)s", {'sg_id': sg_id, 'vnic_id': vnic_id}) except Exception: with excutils.save_and_reraise_exception(): LOG.error("NSX security group %(sg_id)s member add " "failed %(vnic_id)s.", {'sg_id': sg_id, 'vnic_id': vnic_id}) def _add_security_groups_port_mapping(self, session, vnic_id, added_sgids): if vnic_id is None or added_sgids is None: return for add_sg in added_sgids: nsx_sg_id = nsx_db.get_nsx_security_group_id(session, add_sg, moref=True) if nsx_sg_id is None: LOG.warning("NSX security group not found for %s", add_sg) else: self._add_member_to_security_group(nsx_sg_id, vnic_id) def _remove_member_from_security_group(self, sg_id, vnic_id): with locking.LockManager.get_lock('neutron-security-ops' + str(sg_id)): try: h, c = self.nsx_v.vcns.remove_member_from_security_group( sg_id, vnic_id) except Exception: LOG.debug("NSX security group %(nsx_sg_id)s member " "delete failed %(vnic_id)s", {'nsx_sg_id': sg_id, 'vnic_id': vnic_id}) def _delete_security_groups_port_mapping(self, session, vnic_id, deleted_sgids): if vnic_id is None or deleted_sgids is None: return # Remove vnic from delete security groups binding for del_sg in deleted_sgids: nsx_sg_id = nsx_db.get_nsx_security_group_id(session, del_sg, moref=True) if nsx_sg_id is None: LOG.warning("NSX security group not found for %s", del_sg) else: self._remove_member_from_security_group(nsx_sg_id, vnic_id) def _update_security_groups_port_mapping(self, session, port_id, vnic_id, current_sgids, new_sgids): new_sgids = new_sgids or [] current_sgids = current_sgids or [] # If no vnic binding is found, nothing can be done, so return if vnic_id is None: return deleted_sgids = set() added_sgids = set() # Find all delete security group from port binding for curr_sg in current_sgids: if curr_sg not in new_sgids: deleted_sgids.add(curr_sg) # Find all added security group from port binding for new_sg in new_sgids: if new_sg not in current_sgids: added_sgids.add(new_sg) self._delete_security_groups_port_mapping(session, vnic_id, deleted_sgids) self._add_security_groups_port_mapping(session, vnic_id, added_sgids) def _get_port_vnic_id(self, port_index, device_id): # The vnic-id format which is expected by NSXv return '%s.%03d' % (device_id, port_index) def init_availability_zones(self): self._availability_zones_data = nsx_az.NsxVAvailabilityZones( use_tvd_config=self._is_sub_plugin) def _list_availability_zones(self, context, filters=None): #TODO(asarfaty): We may need to use the filters arg, but now it # is here only for overriding the original api result = {} for az in self.get_azs_names(): # Add this availability zone as a router & network resource for resource in ('router', 'network'): result[(az, resource)] = True return result def _validate_availability_zones_in_obj(self, context, resource_type, obj_data): if az_def.AZ_HINTS in obj_data: self.validate_availability_zones(context, resource_type, obj_data[az_def.AZ_HINTS], force=True) def validate_availability_zones(self, context, resource_type, availability_zones, force=False): """Verify that the availability zones exist, and only 1 hint was set. """ # This method is called directly from this plugin but also from # registered callbacks if self._is_sub_plugin and not force: # validation should be done together for both plugins return return self.validate_obj_azs(availability_zones) def _prepare_spoofguard_policy(self, network_type, net_data, net_morefs): # The method will determine if a portgroup is already assigned to a # spoofguard policy. If so, it will return the predefined policy. If # not a new spoofguard policy will be created if network_type == c_utils.NsxVNetworkTypes.PORTGROUP: pcs = self.nsx_v.vcns.get_spoofguard_policies()[1].get('policies', []) for policy in pcs: for ep in policy['enforcementPoints']: if ep['id'] == net_morefs[0]: return policy['policyId'], True LOG.warning("No spoofguard policy will be created for %s", net_data['id']) return None, False # Always use enabled spoofguard policy. ports with disabled port # security will be added to the exclude list sg_policy_id = self.nsx_v.vcns.create_spoofguard_policy( net_morefs, net_data['id'], True)[1] return sg_policy_id, False def _get_physical_network(self, network_type, net_data): if network_type == c_utils.NsxVNetworkTypes.VXLAN: return self._get_network_vdn_scope_id(net_data) else: # Use the dvs_id of the availability zone return self._get_network_az_dvs_id(net_data) def _generate_segment_id(self, context, physical_network, net_data): bindings = nsxv_db.get_network_bindings_by_physical_net( context.session, physical_network) vlan_ranges = self._network_vlans.get(physical_network, []) if vlan_ranges: vlan_ids = set() for vlan_min, vlan_max in vlan_ranges: vlan_ids |= set(moves.range(vlan_min, vlan_max + 1)) else: vlan_min = constants.MIN_VLAN_TAG vlan_max = constants.MAX_VLAN_TAG vlan_ids = set(moves.range(vlan_min, vlan_max + 1)) used_ids_in_range = set([binding.vlan_id for binding in bindings if binding.vlan_id in vlan_ids]) free_ids = list(vlan_ids ^ used_ids_in_range) if len(free_ids) == 0: raise n_exc.NoNetworkAvailable() net_data[mpnet_apidef.SEGMENTS][0][pnet.SEGMENTATION_ID] = free_ids[0] def create_network(self, context, network): net_data = network['network'] tenant_id = net_data['tenant_id'] self._ensure_default_security_group(context, tenant_id) # Process the provider network extension provider_type = self._convert_to_transport_zones_dict(net_data) self._validate_provider_create(context, net_data) self._validate_availability_zones_in_obj(context, 'network', net_data) net_data['id'] = str(uuid.uuid4()) external = net_data.get(extnet_apidef.EXTERNAL) backend_network = (not validators.is_attr_set(external) or validators.is_attr_set(external) and not external) network_type = None generate_segmenation_id = False lock_vlan_creation = False if provider_type is not None: segment = net_data[mpnet_apidef.SEGMENTS][0] network_type = segment.get(pnet.NETWORK_TYPE) if network_type == c_utils.NsxVNetworkTypes.VLAN: physical_network = segment.get(pnet.PHYSICAL_NETWORK) if physical_network in self._network_vlans: lock_vlan_creation = True if not validators.is_attr_set( segment.get(pnet.SEGMENTATION_ID)): generate_segmenation_id = True if lock_vlan_creation: with locking.LockManager.get_lock( 'vlan-networking-%s' % physical_network): if generate_segmenation_id: self._generate_segment_id(context, physical_network, net_data) else: segmentation_id = segment.get(pnet.SEGMENTATION_ID) if nsxv_db.get_network_bindings_by_ids(context.session, segmentation_id, physical_network): raise n_exc.VlanIdInUse( vlan_id=segmentation_id, physical_network=physical_network) return self._create_network(context, network, net_data, provider_type, external, backend_network, network_type) else: return self._create_network(context, network, net_data, provider_type, external, backend_network, network_type) def _create_network(self, context, network, net_data, provider_type, external, backend_network, network_type): # A external network should be created in the case that we have a flat, # vlan or vxlan network. For port groups we do not make any changes. external_backend_network = ( external and provider_type is not None and network_type != c_utils.NsxVNetworkTypes.PORTGROUP) # Update the transparent vlan if configured self._validate_network_qos(net_data, backend_network) vlt = False if n_utils.is_extension_supported(self, 'vlan-transparent'): vlt = vlan_apidef.get_vlan_transparent(net_data) if backend_network or external_backend_network: #NOTE(abhiraut): Consider refactoring code below to have more # readable conditions. if (provider_type is None or network_type == c_utils.NsxVNetworkTypes.VXLAN): virtual_wire = {"name": net_data['id'], "tenantId": "virtual wire tenant"} if vlt: virtual_wire["guestVlanAllowed"] = True config_spec = {"virtualWireCreateSpec": virtual_wire} vdn_scope_id = self._get_network_vdn_scope_id(net_data) if provider_type is not None: segment = net_data[mpnet_apidef.SEGMENTS][0] if validators.is_attr_set( segment.get(pnet.PHYSICAL_NETWORK)): vdn_scope_id = segment.get(pnet.PHYSICAL_NETWORK) if not (self.nsx_v.vcns. validate_vdn_scope(vdn_scope_id)): raise nsx_exc.NsxResourceNotFound( res_name='vdn_scope_id', res_id=vdn_scope_id) h, c = self.nsx_v.vcns.create_virtual_wire(vdn_scope_id, config_spec) net_morefs = [c] dvs_net_ids = [net_data['id']] elif network_type == c_utils.NsxVNetworkTypes.PORTGROUP: if vlt: raise NotImplementedError(_("Transparent support only " "for VXLANs")) segment = net_data[mpnet_apidef.SEGMENTS][0] net_morefs = [segment.get(pnet.PHYSICAL_NETWORK)] dvs_net_ids = [net_data['name']] else: segment = net_data[mpnet_apidef.SEGMENTS][0] physical_network = segment.get(pnet.PHYSICAL_NETWORK) # Retrieve the list of dvs-ids from physical network. # If physical_network attr is not set, retrieve a list # consisting of a single dvs-id pre-configured in nsx.ini az_dvs = self._get_network_az_dvs_id(net_data) dvs_ids = self._get_dvs_ids(physical_network, az_dvs) dvs_net_ids = [] # Save the list of netmorefs from the backend net_morefs = [] dvs_pg_mappings = {} for dvs_id in dvs_ids: try: net_moref = self._create_vlan_network_at_backend( dvs_id=dvs_id, net_data=net_data) except nsx_exc.NsxPluginException: with excutils.save_and_reraise_exception(): # Delete VLAN networks on other DVSes if it # fails to be created on one DVS and reraise # the original exception. for dvsmoref, netmoref in six.iteritems( dvs_pg_mappings): self._delete_backend_network( netmoref, dvsmoref) dvs_pg_mappings[dvs_id] = net_moref net_morefs.append(net_moref) dvs_net_ids.append(self._get_vlan_network_name( net_data, dvs_id)) if vlt: try: self._vcm.update_port_groups_config( dvs_id, net_data['id'], net_moref, self._vcm.update_port_group_spec_trunk, {}) except Exception: with excutils.save_and_reraise_exception(): # Delete VLAN networks on other DVSes if it # fails to be created on one DVS and reraise # the original exception. for dvsmoref, netmoref in six.iteritems( dvs_pg_mappings): self._delete_backend_network( netmoref, dvsmoref) try: net_data[psec.PORTSECURITY] = net_data.get(psec.PORTSECURITY, True) if not cfg.CONF.nsxv.spoofguard_enabled: LOG.info("Network %s will have port security disabled", net_data['id']) net_data[psec.PORTSECURITY] = False # Create SpoofGuard policy for network anti-spoofing sg_policy_id = None if cfg.CONF.nsxv.spoofguard_enabled and backend_network: # This variable is set as the method below may result in a # exception and we may need to rollback predefined = False sg_policy_id, predefined = self._prepare_spoofguard_policy( network_type, net_data, net_morefs) with db_api.context_manager.writer.using(context): new_net = super(NsxVPluginV2, self).create_network(context, network) self._extension_manager.process_create_network( context, net_data, new_net) # Process port security extension self._process_network_port_security_create( context, net_data, new_net) if vlt: super(NsxVPluginV2, self).update_network(context, new_net['id'], {'network': {'vlan_transparent': vlt}}) # update the network with the availability zone hints if az_def.AZ_HINTS in net_data: az_hints = az_validator.convert_az_list_to_string( net_data[az_def.AZ_HINTS]) super(NsxVPluginV2, self).update_network(context, new_net['id'], {'network': {az_def.AZ_HINTS: az_hints}}) new_net[az_def.AZ_HINTS] = az_hints # still no availability zones until subnets creation new_net[az_def.COLLECTION_NAME] = [] # DB Operations for setting the network as external self._process_l3_create(context, new_net, net_data) if (net_data.get(mpnet_apidef.SEGMENTS) and isinstance(provider_type, bool)): net_bindings = [] for tz in net_data[mpnet_apidef.SEGMENTS]: network_type = tz.get(pnet.NETWORK_TYPE) segmentation_id = tz.get(pnet.SEGMENTATION_ID, 0) segmentation_id_set = validators.is_attr_set( segmentation_id) if not segmentation_id_set: segmentation_id = 0 physical_network = tz.get(pnet.PHYSICAL_NETWORK, '') physical_net_set = validators.is_attr_set( physical_network) if not physical_net_set: if external_backend_network: physical_network = net_morefs[0] else: physical_network = self._get_physical_network( network_type, net_data) net_bindings.append(nsxv_db.add_network_binding( context.session, new_net['id'], network_type, physical_network, segmentation_id)) if provider_type: nsx_db.set_multiprovider_network(context.session, new_net['id']) self._extend_network_dict_provider(context, new_net, provider_type, net_bindings) if backend_network or external_backend_network: # Save moref in the DB for future access if (network_type == c_utils.NsxVNetworkTypes.VLAN or network_type == c_utils.NsxVNetworkTypes.FLAT): # Save netmoref to dvs id mappings for VLAN network # type for future access. for dvs_id, netmoref in six.iteritems(dvs_pg_mappings): nsx_db.add_neutron_nsx_network_mapping( session=context.session, neutron_id=new_net['id'], nsx_switch_id=netmoref, dvs_id=dvs_id) else: for net_moref in net_morefs: nsx_db.add_neutron_nsx_network_mapping( context.session, new_net['id'], net_moref) if (cfg.CONF.nsxv.spoofguard_enabled and backend_network and sg_policy_id): nsxv_db.map_spoofguard_policy_for_network( context.session, new_net['id'], sg_policy_id) except Exception: with excutils.save_and_reraise_exception(): # Delete the backend network if backend_network or external_backend_network: if (cfg.CONF.nsxv.spoofguard_enabled and sg_policy_id and not predefined): self.nsx_v.vcns.delete_spoofguard_policy(sg_policy_id) # Ensure that an predefined portgroup will not be deleted if network_type == c_utils.NsxVNetworkTypes.VXLAN: for net_moref in net_morefs: self._delete_backend_network(net_moref) elif (network_type and network_type != c_utils.NsxVNetworkTypes.PORTGROUP): for dvsmrf, netmrf in six.iteritems(dvs_pg_mappings): self._delete_backend_network(netmrf, dvsmrf) LOG.exception('Failed to create network') # If init is incomplete calling _update_qos_network() will result a # deadlock. # That situation happens when metadata init is creating a network # on its 1st execution. # Therefore we skip this code during init. if backend_network and self.init_is_complete: # Update the QOS restrictions of the backend network self._update_qos_on_created_network(context, net_data, new_net) # this extra lookup is necessary to get the # latest db model for the extension functions net_model = self._get_network(context, new_net['id']) resource_extend.apply_funcs('networks', new_net, net_model) return new_net def _update_qos_on_created_network(self, context, net_data, new_net): qos_policy_id = qos_com_utils.set_qos_policy_on_new_net( context, net_data, new_net) if qos_policy_id: # update the QoS data on the backend self._update_qos_on_backend_network( context, net_data['id'], qos_policy_id) def _update_qos_on_backend_network(self, context, net_id, qos_policy_id): # Translate the QoS rule data into Nsx values qos_data = qos_utils.NsxVQosRule( context=context, qos_policy_id=qos_policy_id) # default dvs for this network az = self.get_network_az_by_net_id(context, net_id) az_dvs_id = az.dvs_id # get the network moref/s from the db net_mappings = nsx_db.get_nsx_network_mappings( context.session, net_id) for mapping in net_mappings: # update the qos restrictions of the network self._vcm.update_port_groups_config( mapping.dvs_id or az_dvs_id, net_id, mapping.nsx_id, self._vcm.update_port_group_spec_qos, qos_data) def _cleanup_dhcp_edge_before_deletion(self, context, net_id): if self.metadata_proxy_handler: # Find if this is the last network which is bound # to DHCP Edge. If it is - cleanup Edge metadata config dhcp_edge = nsxv_db.get_dhcp_edge_network_binding( context.session, net_id) if dhcp_edge: edge_vnics = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, dhcp_edge['edge_id']) # If the DHCP Edge is connected to two networks: # the deleted network and the inter-edge network, we can delete # the inter-edge interface if len(edge_vnics) == 2: rtr_binding = nsxv_db.get_nsxv_router_binding_by_edge( context.session, dhcp_edge['edge_id']) if rtr_binding: rtr_id = rtr_binding['router_id'] az_name = rtr_binding['availability_zone'] md_proxy = self.get_metadata_proxy_handler(az_name) if md_proxy: md_proxy.cleanup_router_edge(context, rtr_id) else: self.edge_manager.reconfigure_shared_edge_metadata_port( context, (vcns_const.DHCP_EDGE_PREFIX + net_id)[:36]) def _is_neutron_spoofguard_policy(self, net_id, moref, policy_id): # A neutron policy will have the network UUID as the name of the # policy try: policy = self.nsx_v.vcns.get_spoofguard_policy(policy_id)[1] except Exception: LOG.error("Policy does not exists for %s", policy_id) # We will not attempt to delete a policy that does not exist return False if policy: for ep in policy['enforcementPoints']: if ep['id'] == moref and policy['name'] == net_id: return True return False def _validate_internal_network(self, context, network_id): if nsxv_db.get_nsxv_internal_network_by_id( context.elevated().session, network_id): msg = (_("Cannot delete internal network %s or its subnets and " "ports") % network_id) raise n_exc.InvalidInput(error_message=msg) def delete_network(self, context, id): mappings = nsx_db.get_nsx_network_mappings(context.session, id) bindings = nsxv_db.get_network_bindings(context.session, id) if cfg.CONF.nsxv.spoofguard_enabled: sg_policy_id = nsxv_db.get_spoofguard_policy_id( context.session, id) self._validate_internal_network(context, id) # Update the DHCP edge for metadata and clean the vnic in DHCP edge # if there is only no other existing port besides DHCP port filters = {'network_id': [id]} ports = self.get_ports(context, filters=filters) auto_del = [p['id'] for p in ports if p['device_owner'] in [constants.DEVICE_OWNER_DHCP]] is_dhcp_backend_deleted = False if auto_del: filters = {'network_id': [id], 'enable_dhcp': [True]} sids = self.get_subnets(context, filters=filters, fields=['id']) if len(sids) > 0: try: self._cleanup_dhcp_edge_before_deletion(context, id) self.edge_manager.delete_dhcp_edge_service(context, id) is_dhcp_backend_deleted = True except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to delete network') for port_id in auto_del: try: self.delete_port(context.elevated(), port_id, force_delete_dhcp=True) except Exception as e: LOG.warning('Unable to delete port %(port_id)s. ' 'Reason: %(e)s', {'port_id': port_id, 'e': e}) with db_api.context_manager.writer.using(context): self._process_l3_delete(context, id) # We would first delete subnet db if the backend dhcp service is # deleted in case of entering delete_subnet logic and retrying # to delete backend dhcp service again. if is_dhcp_backend_deleted: subnets = self._get_subnets_by_network(context, id) for subnet in subnets: self.base_delete_subnet(context, subnet['id']) super(NsxVPluginV2, self).delete_network(context, id) # Do not delete a predefined port group that was attached to # an external network if (bindings and bindings[0].binding_type == c_utils.NsxVNetworkTypes.PORTGROUP): if cfg.CONF.nsxv.spoofguard_enabled and sg_policy_id: if self._is_neutron_spoofguard_policy(id, mappings[0].nsx_id, sg_policy_id): self.nsx_v.vcns.delete_spoofguard_policy(sg_policy_id) return # Delete the backend network if necessary. This is done after # the base operation as that may throw an exception in the case # that there are ports defined on the network. if mappings: if cfg.CONF.nsxv.spoofguard_enabled and sg_policy_id: self.nsx_v.vcns.delete_spoofguard_policy(sg_policy_id) edge_utils.check_network_in_use_at_backend(context, id) for mapping in mappings: self._delete_backend_network( mapping.nsx_id, mapping.dvs_id) def _extend_get_network_dict_provider(self, context, net): self._extend_network_dict_provider(context, net) net[qos_consts.QOS_POLICY_ID] = qos_com_utils.get_network_policy_id( context, net['id']) def get_network(self, context, id, fields=None): with db_api.context_manager.reader.using(context): # goto to the plugin DB and fetch the network network = self._get_network(context, id) # Don't do field selection here otherwise we won't be able # to add provider networks fields net_result = self._make_network_dict(network, context=context) self._extend_get_network_dict_provider(context, net_result) return db_utils.resource_fields(net_result, fields) def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} with db_api.context_manager.reader.using(context): networks = ( super(NsxVPluginV2, self).get_networks( context, filters, fields, sorts, limit, marker, page_reverse)) for net in networks: self._extend_get_network_dict_provider(context, net) return (networks if not fields else [db_utils.resource_fields(network, fields) for network in networks]) def _raise_if_updates_provider_attributes(self, original_network, attrs, az_dvs): """Raise exception if provider attributes are present. For the NSX-V we want to allow changing the physical network of vlan type networks. """ if (original_network.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.VLAN and validators.is_attr_set( attrs.get(pnet.PHYSICAL_NETWORK)) and not validators.is_attr_set( attrs.get(pnet.NETWORK_TYPE)) and not validators.is_attr_set( attrs.get(pnet.SEGMENTATION_ID))): return providernet._raise_if_updates_provider_attributes(attrs) def _update_vlan_network_dvs_ids(self, context, network, new_physical_network, az_dvs): """Update the dvs ids of a vlan provider network The new values will replace the old ones. Actions done in this function: - Create a backend network for each new dvs - Delete the backend networks for the old ones. - Return the relevant information in order to later also update the spoofguard policy, qos, network object and DB Returns: - dvs_list_changed True/False - dvs_pg_mappings - updated mapping of the elements dvs->moref """ dvs_pg_mappings = {} current_dvs_ids = set(self._get_dvs_ids( network[pnet.PHYSICAL_NETWORK], az_dvs)) new_dvs_ids = set(self._get_dvs_ids( new_physical_network, az_dvs)) additional_dvs_ids = new_dvs_ids - current_dvs_ids removed_dvs_ids = current_dvs_ids - new_dvs_ids if not additional_dvs_ids and not removed_dvs_ids: # no changes in the list of DVS return False, dvs_pg_mappings self._convert_to_transport_zones_dict(network) # get the current mapping as in the DB db_mapping = nsx_db.get_nsx_network_mappings( context.session, network['id']) for db_map in db_mapping: dvs_pg_mappings[db_map.dvs_id] = db_map.nsx_id # delete old backend networks for dvs_id in removed_dvs_ids: nsx_id = dvs_pg_mappings.get(dvs_id) if nsx_id: #Note(asarfaty) This may fail if there is a vm deployed, but # since the delete is done offline we will not catch it here self._delete_backend_network(nsx_id, dvs_id) del dvs_pg_mappings[dvs_id] # create all the new backend networks for dvs_id in additional_dvs_ids: try: net_moref = self._create_vlan_network_at_backend( dvs_id=dvs_id, net_data=network) except nsx_exc.NsxPluginException: with excutils.save_and_reraise_exception(): # Delete VLAN networks on other DVSes if it # fails to be created on one DVS and reraise # the original exception. for dvsmoref, netmoref in six.iteritems(dvs_pg_mappings): self._delete_backend_network(netmoref, dvsmoref) dvs_pg_mappings[dvs_id] = net_moref return True, dvs_pg_mappings def _update_network_validate_port_sec(self, context, net_id, net_attrs): if psec.PORTSECURITY in net_attrs and not net_attrs[psec.PORTSECURITY]: # check if there are compute ports on this network port_filters = {'network_id': [net_id], 'device_owner': ['compute:None']} compute_ports = self.get_ports(context, filters=port_filters) if compute_ports: LOG.warning("Disabling port-security on network %s would " "require instance in the network to have VM tools " "installed in order for security-groups to " "function properly.", net_id) def update_network(self, context, id, network): net_attrs = network['network'] orig_net = self.get_network(context, id) az_dvs = self._get_network_az_dvs_id(orig_net) self._raise_if_updates_provider_attributes( orig_net, net_attrs, az_dvs) if net_attrs.get("admin_state_up") is False: raise NotImplementedError(_("admin_state_up=False networks " "are not supported.")) ext_net = self._get_network(context, id) if not ext_net.external: net_morefs = nsx_db.get_nsx_switch_ids(context.session, id) else: net_morefs = [] backend_network = True if len(net_morefs) > 0 else False self._validate_network_qos(net_attrs, backend_network) # PortSecurity validation checks psec_update = (psec.PORTSECURITY in net_attrs and orig_net[psec.PORTSECURITY] != net_attrs[psec.PORTSECURITY]) if psec_update: self._update_network_validate_port_sec(context, id, net_attrs) # Check if the physical network of a vlan provider network was updated updated_morefs = False if (net_attrs.get(pnet.PHYSICAL_NETWORK) and orig_net.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.VLAN): (updated_morefs, new_dvs_pg_mappings) = self._update_vlan_network_dvs_ids( context, orig_net, net_attrs[pnet.PHYSICAL_NETWORK], az_dvs) if updated_morefs: net_morefs = list(new_dvs_pg_mappings.values()) with db_api.context_manager.writer.using(context): net_res = super(NsxVPluginV2, self).update_network(context, id, network) self._extension_manager.process_update_network(context, net_attrs, net_res) self._process_network_port_security_update( context, net_attrs, net_res) self._process_l3_update(context, net_res, net_attrs) self._extend_network_dict_provider(context, net_res) if updated_morefs: # delete old mapping before recreating all nsx_db.delete_neutron_nsx_network_mapping( session=context.session, neutron_id=id) # Save netmoref to dvs id mappings for VLAN network # type for future access. dvs_ids = [] for dvs_id, netmoref in six.iteritems(new_dvs_pg_mappings): nsx_db.add_neutron_nsx_network_mapping( session=context.session, neutron_id=id, nsx_switch_id=netmoref, dvs_id=dvs_id) dvs_ids.append(dvs_id) all_dvs = ', '.join(sorted(dvs_ids)) net_res[pnet.PHYSICAL_NETWORK] = all_dvs vlan_id = net_res.get(pnet.SEGMENTATION_ID) nsxv_db.update_network_binding_phy_uuid( context.session, id, net_res.get(pnet.NETWORK_TYPE), vlan_id, all_dvs) # Updating SpoofGuard policy if exists, on failure revert to network # old state if (not ext_net.external and cfg.CONF.nsxv.spoofguard_enabled and updated_morefs): policy_id = nsxv_db.get_spoofguard_policy_id(context.session, id) try: # Always use enabled spoofguard policy. ports with disabled # port security will be added to the exclude list self.nsx_v.vcns.update_spoofguard_policy( policy_id, net_morefs, id, True) except Exception: with excutils.save_and_reraise_exception(): revert_update = db_utils.resource_fields( orig_net, ['shared', psec.PORTSECURITY]) self._process_network_port_security_update( context, revert_update, net_res) super(NsxVPluginV2, self).update_network( context, id, {'network': revert_update}) # Handle QOS updates (Value can be None, meaning to delete the # current policy), or moref updates with an existing qos policy if (not ext_net.external and (qos_consts.QOS_POLICY_ID in net_attrs) or (updated_morefs and orig_net.get(qos_consts.QOS_POLICY_ID))): # update the qos data qos_policy_id = (net_attrs[qos_consts.QOS_POLICY_ID] if qos_consts.QOS_POLICY_ID in net_attrs else orig_net.get(qos_consts.QOS_POLICY_ID)) self._update_qos_on_backend_network(context, id, qos_policy_id) # attach the policy to the network in neutron DB qos_com_utils.update_network_policy_binding( context, id, qos_policy_id) net_res[qos_consts.QOS_POLICY_ID] = ( qos_com_utils.get_network_policy_id(context, id)) # Handle case of network name update - this only is relevant for # networks that we create - not portgroup providers if (net_attrs.get('name') and orig_net.get('name') != net_attrs.get('name') and (orig_net.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.VLAN or orig_net.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.FLAT)): # Only update networks created by plugin mappings = nsx_db.get_nsx_network_mappings(context.session, id) for mapping in mappings: network_name = self._get_vlan_network_name(net_res, mapping.dvs_id) try: self._vcm.update_port_groups_config( mapping.dvs_id, id, mapping.nsx_id, self._dvs.update_port_group_spec_name, network_name) except Exception as e: LOG.error('Unable to update name for net %(net_id)s. ' 'Error: %(e)s', {'net_id': id, 'e': e}) return net_res def _validate_address_pairs(self, attrs, db_port): for ap in attrs[addr_apidef.ADDRESS_PAIRS]: # Check that the IP address is a subnet if len(ap['ip_address'].split('/')) > 1: msg = _('NSXv does not support CIDR as address pairs') raise n_exc.BadRequest(resource='address_pairs', msg=msg) # Check that the MAC address is the same as the port if ('mac_address' in ap and ap['mac_address'] != db_port['mac_address']): msg = _('Address pairs should have same MAC as the port') raise n_exc.BadRequest(resource='address_pairs', msg=msg) def _is_mac_in_use(self, context, network_id, mac_address): # Override this method as the backed doesn't support using the same # mac twice on any network, not just this specific network admin_ctx = context.elevated() return bool(admin_ctx.session.query(models_v2.Port). filter(models_v2.Port.mac_address == mac_address). count()) @db_api.retry_db_errors def base_create_port(self, context, port): created_port = super(NsxVPluginV2, self).create_port(context, port) self._extension_manager.process_create_port( context, port['port'], created_port) return created_port def _validate_extra_dhcp_options(self, opts): if not opts: return for opt in opts: opt_name = opt['opt_name'] opt_val = opt['opt_value'] if opt_name == 'classless-static-route': # separate validation for option121 if opt_val is not None: try: net, ip = opt_val.split(',') except Exception: msg = (_("Bad value %(val)s for DHCP option " "%(name)s") % {'name': opt_name, 'val': opt_val}) raise n_exc.InvalidInput(error_message=msg) elif opt_name not in vcns_const.SUPPORTED_DHCP_OPTIONS: try: option = int(opt_name) except ValueError: option = 255 if option >= 255: msg = (_("DHCP option %s is not supported") % opt_name) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _validate_port_qos(self, port): if validators.is_attr_set(port.get(qos_consts.QOS_POLICY_ID)): err_msg = (_("Cannot configure QOS directly on ports")) raise n_exc.InvalidInput(error_message=err_msg) def create_port(self, context, port): port_data = port['port'] dhcp_opts = port_data.get(ext_edo.EXTRADHCPOPTS) self._validate_extra_dhcp_options(dhcp_opts) self._validate_max_ips_per_port(port_data.get('fixed_ips', []), port_data.get('device_owner')) self._validate_port_qos(port_data) direct_vnic_type = self._validate_port_vnic_type( context, port_data, port_data['network_id']) with db_api.context_manager.writer.using(context): # First we allocate port in neutron database neutron_db = super(NsxVPluginV2, self).create_port(context, port) self._extension_manager.process_create_port( context, port_data, neutron_db) # Port port-security is decided based on port's vnic_type and ports # network port-security state (unless explicitly requested # differently by the user). if not cfg.CONF.nsxv.spoofguard_enabled: port_security = False else: port_security = port_data.get(psec.PORTSECURITY) if validators.is_attr_set(port_security): # 'direct' and 'direct-physical' vnic types ports requires # port-security to be disabled. if direct_vnic_type and port_security: err_msg = _("Security features are not supported for " "ports with direct/direct-physical VNIC type.") raise n_exc.InvalidInput(error_message=err_msg) elif direct_vnic_type: # Implicitly disable port-security for direct vnic types. port_security = False else: port_security = self._get_network_security_binding( context, neutron_db['network_id']) port_data[psec.PORTSECURITY] = port_security provider_sg_specified = (validators.is_attr_set( port_data.get(provider_sg.PROVIDER_SECURITYGROUPS)) and port_data[provider_sg.PROVIDER_SECURITYGROUPS] != []) has_security_groups = ( self._check_update_has_security_groups(port)) self._process_port_port_security_create( context, port_data, neutron_db) self._process_portbindings_create_and_update( context, port_data, neutron_db) # Update fields obtained from neutron db (eg: MAC address) port["port"].update(neutron_db) has_ip = self._ip_on_port(neutron_db) # allowed address pair checks attrs = port[port_def.RESOURCE_NAME] if self._check_update_has_allowed_address_pairs(port): if not port_security: raise addr_exc.AddressPairAndPortSecurityRequired() self._validate_address_pairs(attrs, neutron_db) else: # remove ATTR_NOT_SPECIFIED attrs[addr_apidef.ADDRESS_PAIRS] = [] # security group extension checks if has_ip and port_security: self._ensure_default_security_group_on_port(context, port) (sgids, ssgids) = self._get_port_security_groups_lists( context, port) elif (has_security_groups or provider_sg_specified): LOG.error("Port has conflicting port security status and " "security groups") raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() else: sgids = ssgids = [] self._process_port_create_security_group(context, port_data, sgids) self._process_port_create_provider_security_group(context, port_data, ssgids) neutron_db[addr_apidef.ADDRESS_PAIRS] = ( self._process_create_allowed_address_pairs( context, neutron_db, attrs.get(addr_apidef.ADDRESS_PAIRS))) self._process_port_create_extra_dhcp_opts( context, port_data, dhcp_opts) # MAC learning - only update DB. Can only update NSX when the port # exists - this is done via update if validators.is_attr_set(port_data.get(mac_ext.MAC_LEARNING)): if (((has_ip and port_security) or has_security_groups or provider_sg_specified) and port_data.get(mac_ext.MAC_LEARNING) is True): err_msg = _("Security features are not supported for " "mac learning.") raise n_exc.InvalidInput(error_message=err_msg) self._create_mac_learning_state(context, port_data) elif mac_ext.MAC_LEARNING in port_data: # This is due to the fact that the default is # ATTR_NOT_SPECIFIED port_data.pop(mac_ext.MAC_LEARNING) try: # Configure NSX - this should not be done in the DB transaction # Configure the DHCP Edge service self._create_dhcp_static_binding(context, port_data) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to create port') # Revert what we have created and raise the exception self.delete_port(context, port_data['id']) # this extra lookup is necessary to get the # latest db model for the extension functions port_model = self._get_port(context, port_data['id']) resource_extend.apply_funcs('ports', port_data, port_model) self._remove_provider_security_groups_from_list(port_data) kwargs = {'context': context, 'port': neutron_db} registry.notify(resources.PORT, events.AFTER_CREATE, self, **kwargs) return port_data def _make_port_dict(self, port, fields=None, process_extensions=True): port_data = super(NsxVPluginV2, self)._make_port_dict( port, fields=fields, process_extensions=process_extensions) self._remove_provider_security_groups_from_list(port_data) return port_data def _get_port_subnet_mask(self, context, port): if len(port['fixed_ips']) > 0 and 'subnet_id' in port['fixed_ips'][0]: subnet_id = port['fixed_ips'][0]['subnet_id'] subnet = self._get_subnet(context, subnet_id) return str(netaddr.IPNetwork(subnet.cidr).netmask) def _get_port_fixed_ip_addr(self, port): if (len(port['fixed_ips']) > 0 and 'ip_address' in port['fixed_ips'][0]): return port['fixed_ips'][0]['ip_address'] def _count_no_sec_ports_for_device_id(self, context, device_id): """Find how many compute ports with this device ID and no security there are, so we can decide on adding / removing the device from the exclusion list """ filters = {'device_id': [device_id]} device_ports = self.get_ports(context.elevated(), filters=filters) ports = [port for port in device_ports if port['device_owner'].startswith('compute')] return len([p for p in ports if validators.is_attr_set(p.get(ext_vnic_idx.VNIC_INDEX)) and not p[psec.PORTSECURITY]]) def _add_vm_to_exclude_list(self, context, device_id, port_id): if (self._vcm and cfg.CONF.nsxv.use_exclude_list): # first time for this vm (we expect the count to be 1 already # because the DB was already updated) if (self._count_no_sec_ports_for_device_id( context, device_id) <= 1): vm_moref = self._vcm.get_vm_moref(device_id) if vm_moref is not None: try: LOG.info("Add VM %(dev)s to exclude list on " "behalf of port %(port)s: added to " "list", {"dev": device_id, "port": port_id}) self.nsx_v.vcns.add_vm_to_exclude_list(vm_moref) except vsh_exc.RequestBad as e: LOG.error("Failed to add vm %(device)s " "moref %(moref)s to exclude list: " "%(err)s", {'device': device_id, 'moref': vm_moref, 'err': e}) else: LOG.info("Add VM %(dev)s to exclude list on behalf of " "port %(port)s: VM already in list", {"dev": device_id, "port": port_id}) loose_ver = version.LooseVersion(self.nsx_v.vcns.get_version()) if loose_ver < version.LooseVersion('6.3.3'): LOG.info("Syncing firewall") self.nsx_v.vcns.sync_firewall() def _remove_vm_from_exclude_list(self, context, device_id, port_id, expected_count=0): if (self._vcm and cfg.CONF.nsxv.use_exclude_list): # No ports left in DB (expected count is 0 or 1 depending # on whether the DB was already updated), # So we can remove it from the backend exclude list if (self._count_no_sec_ports_for_device_id( context, device_id) <= expected_count): vm_moref = self._vcm.get_vm_moref(device_id) if vm_moref is not None: try: LOG.info("Remove VM %(dev)s from exclude list on " "behalf of port %(port)s: removed from " "list", {"dev": device_id, "port": port_id}) self.nsx_v.vcns.delete_vm_from_exclude_list(vm_moref) except vsh_exc.RequestBad as e: LOG.error("Failed to delete vm %(device)s " "moref %(moref)s from exclude list: " "%(err)s", {'device': device_id, 'moref': vm_moref, 'err': e}) else: LOG.info("Remove VM %(dev)s from exclude list on behalf " "of port %(port)s: other ports still in list", {"dev": device_id, "port": port_id}) def update_port(self, context, id, port): with locking.LockManager.get_lock('port-update-%s' % id): original_port = super(NsxVPluginV2, self).get_port(context, id) is_compute_port = self._is_compute_port(original_port) device_id = original_port['device_id'] if is_compute_port and device_id: # Lock on the device ID to make sure we do not change/delete # ports of the same device at the same time with locking.LockManager.get_lock( 'port-device-%s' % device_id): return self._update_port(context, id, port, original_port, is_compute_port, device_id) else: return self._update_port(context, id, port, original_port, is_compute_port, device_id) def _update_dhcp_address(self, context, network_id): with locking.LockManager.get_lock('dhcp-update-%s' % network_id): address_groups = self._create_network_dhcp_address_group( context, network_id) self.edge_manager.update_dhcp_edge_service( context, network_id, address_groups=address_groups) def _nsx_update_mac_learning(self, context, port): net_id = port['network_id'] # default dvs for this network az = self.get_network_az_by_net_id(context, net_id) az_dvs_id = az.dvs_id # get the network moref/s from the db net_mappings = nsx_db.get_nsx_network_mappings( context.session, net_id) for mapping in net_mappings: dvs_id = mapping.dvs_id or az_dvs_id try: self._vcm.update_port_groups_config( dvs_id, net_id, mapping.nsx_id, self._vcm.update_port_group_security_policy, True) except Exception as e: LOG.error("Unable to update network security override " "policy: %s", e) return self._vcm.update_port_security_policy( dvs_id, net_id, mapping.nsx_id, port['device_id'], port['mac_address'], port[mac_ext.MAC_LEARNING]) def _update_port(self, context, id, port, original_port, is_compute_port, device_id): attrs = port[port_def.RESOURCE_NAME] port_data = port['port'] dhcp_opts = port_data.get(ext_edo.EXTRADHCPOPTS) self._validate_extra_dhcp_options(dhcp_opts) self._validate_port_qos(port_data) if addr_apidef.ADDRESS_PAIRS in attrs: self._validate_address_pairs(attrs, original_port) self._validate_max_ips_per_port( port_data.get('fixed_ips', []), port_data.get('device_owner', original_port['device_owner'])) orig_has_port_security = (cfg.CONF.nsxv.spoofguard_enabled and original_port[psec.PORTSECURITY]) port_mac_change = port_data.get('mac_address') is not None port_ip_change = port_data.get('fixed_ips') is not None device_owner_change = port_data.get('device_owner') is not None # We do not support updating the port ip and device owner together if port_ip_change and device_owner_change: msg = (_('Cannot set fixed ips and device owner together for port ' '%s') % original_port['id']) raise n_exc.BadRequest(resource='port', msg=msg) # Check if port security has changed port_sec_change = False has_port_security = orig_has_port_security if (psec.PORTSECURITY in port_data and port_data[psec.PORTSECURITY] != original_port[psec.PORTSECURITY]): port_sec_change = True has_port_security = (cfg.CONF.nsxv.spoofguard_enabled and port_data[psec.PORTSECURITY]) # We do not support modification of port security with other # parameters (only with security groups) to reduce some of # the complications if (len(port_data.keys()) > 2 or (ext_sg.SECURITYGROUPS not in port_data and len(port_data.keys()) > 1)): msg = (_('Port security can only be set with security-groups ' 'and no other attributes for port %s') % original_port['id']) raise n_exc.BadRequest(resource='port', msg=msg) # Address pairs require port security if (not has_port_security and (original_port[addr_apidef.ADDRESS_PAIRS] or addr_apidef.ADDRESS_PAIRS in attrs)): msg = _('Address pairs require port security enabled') raise n_exc.BadRequest(resource='port', msg=msg) # TODO(roeyc): create a method '_process_vnic_index_update' from the # following code block # Process update for vnic-index vnic_idx = port_data.get(ext_vnic_idx.VNIC_INDEX) # Only set the vnic index for a compute VM if validators.is_attr_set(vnic_idx) and is_compute_port: # Update database only if vnic index was changed if original_port.get(ext_vnic_idx.VNIC_INDEX) != vnic_idx: self._set_port_vnic_index_mapping( context, id, device_id, vnic_idx) vnic_id = self._get_port_vnic_id(vnic_idx, device_id) self._add_security_groups_port_mapping( context.session, vnic_id, original_port[ext_sg.SECURITYGROUPS] + original_port[provider_sg.PROVIDER_SECURITYGROUPS]) if has_port_security: LOG.debug("Assigning vnic port fixed-ips: port %s, " "vnic %s, with fixed-ips %s", id, vnic_id, original_port['fixed_ips']) self._update_vnic_assigned_addresses( context.session, original_port, vnic_id) if (cfg.CONF.nsxv.use_default_block_all and not original_port[ext_sg.SECURITYGROUPS]): self._add_member_to_security_group( self.sg_container_id, vnic_id) else: # Add vm to the exclusion list, since it has no port security self._add_vm_to_exclude_list(context, device_id, id) # if service insertion is enabled - add this vnic to the service # insertion security group if self._si_handler.enabled and original_port[psec.PORTSECURITY]: self._add_member_to_security_group(self._si_handler.sg_id, vnic_id) provider_sgs_specified = validators.is_attr_set( port_data.get(provider_sg.PROVIDER_SECURITYGROUPS)) delete_provider_sg = provider_sgs_specified and ( port_data[provider_sg.PROVIDER_SECURITYGROUPS] != []) delete_security_groups = self._check_update_deletes_security_groups( port) has_security_groups = self._check_update_has_security_groups(port) comp_owner_update = ('device_owner' in port_data and port_data['device_owner'].startswith('compute:')) direct_vnic_type = self._validate_port_vnic_type( context, port_data, original_port['network_id']) if direct_vnic_type and has_port_security: err_msg = _("Security features are not supported for " "ports with direct/direct-physical VNIC type.") raise n_exc.InvalidInput(error_message=err_msg) if (mac_ext.MAC_LEARNING in port_data and port_data[mac_ext.MAC_LEARNING] is True and has_port_security): err_msg = _("Security features are not supported for " "mac_learning.") raise n_exc.InvalidInput(error_message=err_msg) old_mac_learning_state = original_port.get(mac_ext.MAC_LEARNING) with db_api.context_manager.writer.using(context): ret_port = super(NsxVPluginV2, self).update_port( context, id, port) self._extension_manager.process_update_port( context, port_data, ret_port) self._process_portbindings_create_and_update( context, port_data, ret_port) # copy values over - except fixed_ips as # they've already been processed updates_fixed_ips = port['port'].pop('fixed_ips', []) ret_port.update(port['port']) has_ip = self._ip_on_port(ret_port) # checks that if update adds/modify security groups, # then port has ip and port-security if not (has_ip and has_port_security): if has_security_groups or provider_sgs_specified: LOG.error("Port has conflicting port security status and " "security groups") raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() if ((not delete_security_groups and original_port[ext_sg.SECURITYGROUPS]) or (not delete_provider_sg and original_port[provider_sg.PROVIDER_SECURITYGROUPS])): LOG.error("Port has conflicting port security status and " "security groups") raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() if delete_security_groups or has_security_groups: self.update_security_group_on_port(context, id, port, original_port, ret_port) # NOTE(roeyc): Should call this method only after # update_security_group_on_port was called. pvd_sg_changed = self._process_port_update_provider_security_group( context, port, original_port, ret_port) update_assigned_addresses = False if addr_apidef.ADDRESS_PAIRS in attrs: update_assigned_addresses = self.update_address_pairs_on_port( context, id, port, original_port, ret_port) self._update_extra_dhcp_opts_on_port(context, id, port, ret_port) new_mac_learning_state = ret_port.get(mac_ext.MAC_LEARNING) if (new_mac_learning_state is not None and old_mac_learning_state != new_mac_learning_state): self._update_mac_learning_state(context, id, new_mac_learning_state) # update port security in DB if changed if psec.PORTSECURITY in port['port']: self._process_port_port_security_update( context, port_data, ret_port) if comp_owner_update: # Create dhcp bindings, the port is now owned by an instance self._create_dhcp_static_binding(context, ret_port) elif port_mac_change or port_ip_change or dhcp_opts: owner = original_port['device_owner'] # If port IP has changed we should update according to device # owner if is_compute_port: # This is an instance port, so re-create DHCP entry self._delete_dhcp_static_binding(context, original_port) self._create_dhcp_static_binding(context, ret_port) elif owner == constants.DEVICE_OWNER_DHCP: # Update the ip of the dhcp port # Note: if there are no fixed ips this means that we are in # the process of deleting the subnet of this port. # In this case we should avoid updating the nsx backed as the # delete subnet will soon do it. if dhcp_opts or ret_port.get('fixed_ips'): self._update_dhcp_address(context, ret_port['network_id']) elif (owner == constants.DEVICE_OWNER_ROUTER_GW or owner == constants.DEVICE_OWNER_ROUTER_INTF): # This is a router port - update the edge appliance old_ip = self._get_port_fixed_ip_addr(original_port) new_ip = self._get_port_fixed_ip_addr(ret_port) if ((old_ip is not None or new_ip is not None) and (old_ip != new_ip)): if validators.is_attr_set(original_port.get('device_id')): router_id = original_port['device_id'] router_driver = self._find_router_driver(context, router_id) # subnet mask is needed for adding new ip to the vnic sub_mask = self._get_port_subnet_mask(context, ret_port) router_driver.update_router_interface_ip( context, router_id, original_port['id'], ret_port['network_id'], old_ip, new_ip, sub_mask) else: LOG.info('Not updating fixed IP on backend for ' 'device owner [%(dev_own)s] and port %(pid)s', {'dev_own': owner, 'pid': original_port['id']}) # Processing compute port update vnic_idx = original_port.get(ext_vnic_idx.VNIC_INDEX) if validators.is_attr_set(vnic_idx) and is_compute_port: vnic_id = self._get_port_vnic_id(vnic_idx, device_id) curr_sgids = ( original_port[provider_sg.PROVIDER_SECURITYGROUPS] + original_port[ext_sg.SECURITYGROUPS]) if ret_port['device_id'] != device_id: # Update change device_id - remove port-vnic association and # delete security-groups memberships for the vnic self._delete_security_groups_port_mapping( context.session, vnic_id, curr_sgids) if cfg.CONF.nsxv.spoofguard_enabled: if original_port[psec.PORTSECURITY]: try: self._remove_vnic_from_spoofguard_policy( context.session, original_port['network_id'], vnic_id) except Exception as e: LOG.error('Could not delete the spoofguard ' 'policy. Exception %s', e) # remove vm from the exclusion list when it is detached # from the device if it has no port security if not original_port[psec.PORTSECURITY]: self._remove_vm_from_exclude_list( context, device_id, id) self._delete_port_vnic_index_mapping(context, id) self._delete_dhcp_static_binding(context, original_port) # if service insertion is enabled - remove this vnic from the # service insertion security group if (self._si_handler.enabled and original_port[psec.PORTSECURITY]): self._remove_member_from_security_group( self._si_handler.sg_id, vnic_id) else: # port security enabled / disabled if port_sec_change: if has_port_security: LOG.debug("Assigning vnic port fixed-ips: port %s, " "vnic %s, with fixed-ips %s", id, vnic_id, original_port['fixed_ips']) self._update_vnic_assigned_addresses( context.session, original_port, vnic_id) # Remove vm from the exclusion list, since it now has # port security self._remove_vm_from_exclude_list(context, device_id, id) # add the vm to the service insertion if self._si_handler.enabled: self._add_member_to_security_group( self._si_handler.sg_id, vnic_id) elif cfg.CONF.nsxv.spoofguard_enabled: try: self._remove_vnic_from_spoofguard_policy( context.session, original_port['network_id'], vnic_id) except Exception as e: LOG.error('Could not delete the spoofguard ' 'policy. Exception %s', e) # Add vm to the exclusion list, since it has no port # security now self._add_vm_to_exclude_list(context, device_id, id) # remove the vm from the service insertion if self._si_handler.enabled: self._remove_member_from_security_group( self._si_handler.sg_id, vnic_id) # Update vnic with the newest approved IP addresses if (has_port_security and (updates_fixed_ips or update_assigned_addresses)): LOG.debug("Updating vnic port fixed-ips: port %s, vnic " "%s, fixed-ips %s", id, vnic_id, ret_port['fixed_ips']) self._update_vnic_assigned_addresses( context.session, ret_port, vnic_id) if not has_port_security and has_security_groups: LOG.warning("port-security is disabled on " "port %(id)s, " "VM tools must be installed on instance " "%(device_id)s for security-groups to " "function properly ", {'id': id, 'device_id': original_port['device_id']}) if (delete_security_groups or has_security_groups or pvd_sg_changed): # Update security-groups, # calculate differences and update vnic membership # accordingly. new_sgids = ( ret_port[provider_sg.PROVIDER_SECURITYGROUPS] + ret_port[ext_sg.SECURITYGROUPS]) self._update_security_groups_port_mapping( context.session, id, vnic_id, curr_sgids, new_sgids) if (cfg.CONF.nsxv.use_default_block_all and not ret_port[ext_sg.SECURITYGROUPS]): # If there are no security groups ensure that the # default is 'Drop All' self._add_member_to_security_group( self.sg_container_id, vnic_id) # update mac learning on NSX if self._vcm: mac_learning = self.get_mac_learning_state(context, id) if mac_learning is not None: try: self._nsx_update_mac_learning(context, ret_port) except Exception as e: LOG.error("Unable to update mac learning for port %s, " "reason: %s", id, e) kwargs = { 'context': context, 'port': ret_port, 'mac_address_updated': False, 'original_port': original_port, } registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs) return ret_port def _extend_get_port_dict_qos(self, context, port): # add the qos policy id from the DB (always None in this plugin) port[qos_consts.QOS_POLICY_ID] = qos_com_utils.get_port_policy_id( context, port['id']) def get_port(self, context, id, fields=None): port = super(NsxVPluginV2, self).get_port(context, id, fields=None) self._extend_get_port_dict_qos(context, port) return db_utils.resource_fields(port, fields) def delete_port(self, context, id, l3_port_check=True, nw_gw_port_check=True, force_delete_dhcp=False, allow_delete_internal=False): kwargs = { 'context': context, 'port_check': l3_port_check, 'port_id': id, } # Send delete port notification to any interested service plugin registry.notify(resources.PORT, events.BEFORE_DELETE, self, **kwargs) neutron_db_port = self.get_port(context, id) device_id = neutron_db_port['device_id'] is_compute_port = self._is_compute_port(neutron_db_port) if not allow_delete_internal: self._validate_internal_network( context, neutron_db_port['network_id']) if is_compute_port and device_id: # Lock on the device ID to make sure we do not change/delete # ports of the same device at the same time with locking.LockManager.get_lock( 'port-device-%s' % device_id): return self._delete_port(context, id, l3_port_check, nw_gw_port_check, neutron_db_port, force_delete_dhcp) else: return self._delete_port(context, id, l3_port_check, nw_gw_port_check, neutron_db_port, force_delete_dhcp) def _delete_port(self, context, id, l3_port_check, nw_gw_port_check, neutron_db_port, force_delete_dhcp=False): """Deletes a port on a specified Virtual Network. If the port contains a remote interface attachment, the remote interface is first un-plugged and then the port is deleted. :returns: None :raises: exception.PortInUse :raises: exception.PortNotFound :raises: exception.NetworkNotFound """ # if needed, check to see if this is a port owned by # a l3 router. If so, we should prevent deletion here if l3_port_check: self.prevent_l3_port_deletion(context, id) if (not force_delete_dhcp and neutron_db_port['device_owner'] in [constants.DEVICE_OWNER_DHCP]): msg = (_('Can not delete DHCP port %s') % neutron_db_port['id']) raise n_exc.BadRequest(resource='port', msg=msg) # If this port is attached to a device, remove the corresponding vnic # from all NSXv Security-Groups and the spoofguard policy port_index = neutron_db_port.get(ext_vnic_idx.VNIC_INDEX) if validators.is_attr_set(port_index): vnic_id = self._get_port_vnic_id(port_index, neutron_db_port['device_id']) sgids = neutron_db_port.get(ext_sg.SECURITYGROUPS) self._delete_security_groups_port_mapping( context.session, vnic_id, sgids) # if service insertion is enabled - remove this vnic from the # service insertion security group if self._si_handler.enabled and neutron_db_port[psec.PORTSECURITY]: self._remove_member_from_security_group(self._si_handler.sg_id, vnic_id) if (cfg.CONF.nsxv.spoofguard_enabled and neutron_db_port[psec.PORTSECURITY]): try: self._remove_vnic_from_spoofguard_policy( context.session, neutron_db_port['network_id'], vnic_id) except Exception as e: LOG.error('Could not delete the spoofguard policy. ' 'Exception %s', e) if (not neutron_db_port[psec.PORTSECURITY] and self._is_compute_port(neutron_db_port)): device_id = neutron_db_port['device_id'] # Note that we expect to find 1 relevant port in the DB still # because this port was not yet deleted self._remove_vm_from_exclude_list(context, device_id, id, expected_count=1) self.disassociate_floatingips(context, id) with db_api.context_manager.writer.using(context): super(NsxVPluginV2, self).delete_port(context, id) self._delete_dhcp_static_binding(context, neutron_db_port) def base_delete_subnet(self, context, subnet_id): with locking.LockManager.get_lock('neutron-base-subnet'): super(NsxVPluginV2, self).delete_subnet(context, subnet_id) def delete_subnet(self, context, id): subnet = self._get_subnet(context, id) filters = {'fixed_ips': {'subnet_id': [id]}} ports = self.get_ports(context, filters=filters) # Add nsx-dhcp-edge-pool here is because we first delete the subnet in # db.locking if the subnet overlaps with another new creating subnet, # there is a chance that the new creating subnet select the deleting # subnet's edge and send update dhcp interface rest call before # deleting subnet's corresponding dhcp interface rest call and lead to # overlap response from backend. network_id = subnet['network_id'] self._validate_internal_network(context, network_id) with locking.LockManager.get_lock(network_id): with db_api.context_manager.writer.using(context): self.base_delete_subnet(context, id) with locking.LockManager.get_lock('nsx-dhcp-edge-pool'): if subnet['enable_dhcp']: # There is only DHCP port available if len(ports) == 1: port = ports.pop() # This is done out of the transaction as it invokes # update_port which interfaces with the NSX self.ipam.delete_port(context, port['id']) # Delete the DHCP edge service filters = {'network_id': [network_id]} remaining_subnets = self.get_subnets(context, filters=filters) if len(remaining_subnets) == 0: self._cleanup_dhcp_edge_before_deletion( context, network_id) LOG.debug("Delete the DHCP service for network %s", network_id) self.edge_manager.delete_dhcp_edge_service(context, network_id) else: # Update address group and delete the DHCP port only self._update_dhcp_address(context, network_id) def _is_overlapping_reserved_subnets(self, subnet): """Return True if the subnet overlaps with reserved subnets. For the V plugin we have a limitation that we should not use some reserved ranges like: 169.254.128.0/17 and 169.254.1.0/24 """ # translate the given subnet to a range object data = subnet['subnet'] if data['cidr'] not in (constants.ATTR_NOT_SPECIFIED, None): reserved_subnets = list(nsxv_constants.RESERVED_IPS) reserved_subnets.append(cfg.CONF.nsxv.vdr_transit_network) return edge_utils.is_overlapping_reserved_subnets(data['cidr'], reserved_subnets) return False def _get_dhcp_ip_addr_from_subnet(self, context, subnet_id): dhcp_port_filters = {'fixed_ips': {'subnet_id': [subnet_id]}, 'device_owner': [constants.DEVICE_OWNER_DHCP]} dhcp_ports = self.get_ports(context, filters=dhcp_port_filters) if dhcp_ports and dhcp_ports[0].get('fixed_ips'): return dhcp_ports[0]['fixed_ips'][0]['ip_address'] def is_dhcp_metadata(self, context, subnet_id): try: subnet = self.get_subnet(context, subnet_id) except n_exc.SubnetNotFound: LOG.debug("subnet %s not found to determine its dhcp meta", subnet_id) return False return bool(subnet['enable_dhcp'] and self.metadata_proxy_handler) def _validate_host_routes_input(self, subnet_input, orig_enable_dhcp=None, orig_host_routes=None): s = subnet_input['subnet'] request_host_routes = (validators.is_attr_set(s.get('host_routes')) and s['host_routes']) clear_host_routes = (validators.is_attr_set(s.get('host_routes')) and not s['host_routes']) request_enable_dhcp = s.get('enable_dhcp') if request_enable_dhcp is False: if (request_host_routes or not clear_host_routes and orig_host_routes): err_msg = _("Can't disable DHCP while using host routes") raise n_exc.InvalidInput(error_message=err_msg) if request_host_routes: if not request_enable_dhcp and orig_enable_dhcp is False: err_msg = _("Host routes can only be supported when DHCP " "is enabled") raise n_exc.InvalidInput(error_message=err_msg) def create_subnet_bulk(self, context, subnets): collection = "subnets" items = subnets[collection] new_subnets = [] for item in items: try: s = self.create_subnet(context, item) new_subnets.append(s) except Exception as e: LOG.error('Unable to create bulk subnets. Failed to ' 'create item %(item)s. Rolling back. ' 'Error: %(e)s', {'item': item, 'e': e}) for subnet in new_subnets: s_id = subnet['id'] try: self.delete_subnet(context, s_id) except Exception: LOG.error('Unable to delete subnet %s', s_id) raise return new_subnets def base_create_subnet(self, context, subnet): with locking.LockManager.get_lock('neutron-base-subnet'): return super(NsxVPluginV2, self).create_subnet(context, subnet) def create_subnet(self, context, subnet): """Create subnet on nsx_v provider network. If the subnet is created with DHCP enabled, and the network which the subnet is attached is not bound to an DHCP Edge, nsx_v will create the Edge and make sure the network is bound to the Edge """ self._validate_host_routes_input(subnet) if subnet['subnet']['enable_dhcp']: self._validate_external_subnet(context, subnet['subnet']['network_id']) data = subnet['subnet'] if (data.get('ip_version') == 6 or (data['cidr'] not in (constants.ATTR_NOT_SPECIFIED, None) and netaddr.IPNetwork(data['cidr']).version == 6)): err_msg = _("No support for DHCP for IPv6") raise n_exc.InvalidInput(error_message=err_msg) if self._is_overlapping_reserved_subnets(subnet): err_msg = _("The requested subnet contains reserved IP's") raise n_exc.InvalidInput(error_message=err_msg) with locking.LockManager.get_lock(subnet['subnet']['network_id']): s = self.base_create_subnet(context, subnet) self._extension_manager.process_create_subnet( context, subnet['subnet'], s) if s['enable_dhcp']: try: self._process_subnet_ext_attr_create( session=context.session, subnet_db=s, subnet_req=data) self._update_dhcp_service_with_subnet(context, s) except Exception: with excutils.save_and_reraise_exception(): self.base_delete_subnet(context, s['id']) return s def _process_subnet_ext_attr_create(self, session, subnet_db, subnet_req): # Verify if dns search domain/dhcp mtu for subnet are configured dns_search_domain = subnet_req.get( ext_dns_search_domain.DNS_SEARCH_DOMAIN) dhcp_mtu = subnet_req.get( ext_dhcp_mtu.DHCP_MTU) if (not validators.is_attr_set(dns_search_domain) and not validators.is_attr_set(dhcp_mtu)): return if not validators.is_attr_set(dns_search_domain): dns_search_domain = None if not validators.is_attr_set(dhcp_mtu): dhcp_mtu = None sub_binding = nsxv_db.get_nsxv_subnet_ext_attributes( session=session, subnet_id=subnet_db['id']) # Create a subnet extensions for subnet if it does not exist if not sub_binding: nsxv_db.add_nsxv_subnet_ext_attributes( session=session, subnet_id=subnet_db['id'], dns_search_domain=dns_search_domain, dhcp_mtu=dhcp_mtu) # Else update only if a new values for subnet extensions are provided elif (sub_binding.dns_search_domain != dns_search_domain or sub_binding.dhcp_mtu != dhcp_mtu): nsxv_db.update_nsxv_subnet_ext_attributes( session=session, subnet_id=subnet_db['id'], dns_search_domain=dns_search_domain, dhcp_mtu=dhcp_mtu) subnet_db['dns_search_domain'] = dns_search_domain subnet_db['dhcp_mtu'] = dhcp_mtu def _process_subnet_ext_attr_update(self, session, subnet_db, subnet_req): update_dhcp_config = False # Update extended attributes for subnet if (ext_dns_search_domain.DNS_SEARCH_DOMAIN in subnet_req or ext_dhcp_mtu.DHCP_MTU in subnet_req): self._process_subnet_ext_attr_create(session, subnet_db, subnet_req) update_dhcp_config = True return update_dhcp_config def _update_routers_on_gateway_change(self, context, subnet_id, new_gateway): """Update all relevant router edges that the nexthop changed.""" port_filters = {'device_owner': [l3_db.DEVICE_OWNER_ROUTER_GW], 'fixed_ips': {'subnet_id': [subnet_id]}} intf_ports = self.get_ports(context.elevated(), filters=port_filters) router_ids = [port['device_id'] for port in intf_ports] for router_id in router_ids: router_driver = self._find_router_driver(context, router_id) router_driver._update_nexthop(context, router_id, new_gateway) def update_subnet(self, context, id, subnet): # Lock the subnet so that no other conflicting action can occur on # the same subnet with locking.LockManager.get_lock('subnet-%s' % id): return self._safe_update_subnet(context, id, subnet) def _safe_update_subnet(self, context, id, subnet): s = subnet['subnet'] orig = self._get_subnet(context, id) gateway_ip = orig['gateway_ip'] enable_dhcp = orig['enable_dhcp'] orig_host_routes = orig['routes'] self._validate_host_routes_input(subnet, orig_enable_dhcp=enable_dhcp, orig_host_routes=orig_host_routes) subnet = super(NsxVPluginV2, self).update_subnet(context, id, subnet) self._extension_manager.process_update_subnet(context, s, subnet) update_dhcp_config = self._process_subnet_ext_attr_update( context.session, subnet, s) if (gateway_ip != subnet['gateway_ip'] or update_dhcp_config or set(orig['dns_nameservers']) != set(subnet['dns_nameservers']) or orig_host_routes != subnet['host_routes'] or enable_dhcp and not subnet['enable_dhcp']): # Need to ensure that all of the subnet attributes will be reloaded # when creating the edge bindings. Without adding this the original # subnet details are provided. context.session.expire_all() # Update the edge network_id = subnet['network_id'] self.edge_manager.update_dhcp_edge_bindings(context, network_id) # also update routers that use this subnet as their gateway if gateway_ip != subnet['gateway_ip']: self._update_routers_on_gateway_change(context, id, subnet['gateway_ip']) if enable_dhcp != subnet['enable_dhcp']: self._update_subnet_dhcp_status(subnet, context) return subnet @staticmethod @resource_extend.extends([subnet_def.COLLECTION_NAME]) def _extend_subnet_dict_extended_attributes(subnet_res, subnet_db): subnet_attr = subnet_db.get('nsxv_subnet_attributes') if subnet_attr: subnet_res['dns_search_domain'] = subnet_attr.dns_search_domain subnet_res['dhcp_mtu'] = subnet_attr.dhcp_mtu def _is_subnet_gw_a_vdr(self, context, subnet): filters = {'fixed_ips': {'subnet_id': [subnet['id']], 'ip_address': [subnet['gateway_ip']]}} ports = self.get_ports(context, filters=filters) if ports and ports[0].get('device_id'): rtr_id = ports[0].get('device_id') rtr = self.get_router(context, rtr_id) if rtr and rtr.get('distributed'): return rtr_id def _update_subnet_dhcp_status(self, subnet, context): network_id = subnet['network_id'] if subnet['enable_dhcp']: # Check if the network has one related dhcp edge resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] edge_binding = nsxv_db.get_nsxv_router_binding(context.session, resource_id) if edge_binding: # Create DHCP port port_dict = {'name': '', 'admin_state_up': True, 'network_id': network_id, 'tenant_id': subnet['tenant_id'], 'fixed_ips': [{'subnet_id': subnet['id']}], 'device_owner': constants.DEVICE_OWNER_DHCP, 'device_id': n_utils.get_dhcp_agent_device_id( network_id, 'nsxv'), 'mac_address': constants.ATTR_NOT_SPECIFIED } self.create_port(context, {'port': port_dict}) # First time binding network with dhcp edge else: with locking.LockManager.get_lock(subnet['network_id']): self._update_dhcp_service_with_subnet(context, subnet) return else: # delete dhcp port filters = {'fixed_ips': {'subnet_id': [subnet['id']]}} ports = self.get_ports(context, filters=filters) for port in ports: if port["device_owner"] == constants.DEVICE_OWNER_DHCP: self.ipam.delete_port(context, port['id']) # Delete the DHCP edge service network_id = subnet['network_id'] filters = {'network_id': [network_id]} subnets = self.get_subnets(context, filters=filters) cleaup_edge = True for s in subnets: if s['enable_dhcp']: cleaup_edge = False if cleaup_edge: self._cleanup_dhcp_edge_before_deletion( context, network_id) LOG.debug("Delete the DHCP service for network %s", network_id) self.edge_manager.delete_dhcp_edge_service(context, network_id) return self._update_dhcp_address(context, network_id) def _get_conflict_network_ids_by_overlapping(self, context, subnets): with locking.LockManager.get_lock('nsx-networking'): conflict_network_ids = [] subnet_ids = [subnet['id'] for subnet in subnets] conflict_set = netaddr.IPSet( [subnet['cidr'] for subnet in subnets]) subnets_qry = context.session.query(models_v2.Subnet).all() subnets_all = [subnet for subnet in subnets_qry if subnet['id'] not in subnet_ids] for subnet in subnets_all: cidr_set = netaddr.IPSet([subnet['cidr']]) if cidr_set & conflict_set: conflict_network_ids.append(subnet['network_id']) return conflict_network_ids def _get_conflicting_networks_for_subnet(self, context, subnet): """Return a list if networks IDs conflicting with requested subnet The requested subnet cannot be placed on the same DHCP edge as the conflicting networks. A network will be conflicting with the current subnet if: 1. overlapping ips 2. provider networks with different physical network 3. flat provider network with any other flat network 4. if not share_edges_between_tenants: networks of different tenants """ subnet_net = subnet['network_id'] subnet_tenant = subnet['tenant_id'] # The DHCP for network with different physical network can not be used # The flat network should be located in different DHCP conflicting_networks = [] all_networks = self.get_networks(context.elevated(), fields=['id', 'tenant_id']) phy_net = nsxv_db.get_network_bindings(context.session, subnet_net) if phy_net: binding_type = phy_net[0]['binding_type'] phy_uuid = phy_net[0]['phy_uuid'] for net_id in all_networks: p_net = nsxv_db.get_network_bindings(context.session, net_id['id']) if (p_net and binding_type == p_net[0]['binding_type'] and binding_type == c_utils.NsxVNetworkTypes.FLAT): conflicting_networks.append(net_id['id']) elif (p_net and phy_uuid != p_net[0]['phy_uuid']): conflicting_networks.append(net_id['id']) # get conflicting networks of other tenants if not cfg.CONF.nsxv.share_edges_between_tenants: for another_net in all_networks: if (another_net['id'] != subnet_net and another_net['tenant_id'] != subnet_tenant): conflicting_networks.append(another_net['id']) # get all of the subnets on the network, there may be more than one filters = {'network_id': [subnet_net]} subnets = super(NsxVPluginV2, self).get_subnets(context.elevated(), filters=filters) # Query all networks with overlap subnet if cfg.CONF.allow_overlapping_ips: conflicting_networks.extend( self._get_conflict_network_ids_by_overlapping( context.elevated(), subnets)) conflicting_networks = list(set(conflicting_networks)) return conflicting_networks def _get_edge_id_by_rtr_id(self, context, rtr_id): binding = nsxv_db.get_nsxv_router_binding( context.session, rtr_id) if binding: return binding['edge_id'] def _get_edge_id_and_az_by_rtr_id(self, context, rtr_id): binding = nsxv_db.get_nsxv_router_binding( context.session, rtr_id) if binding: return binding['edge_id'], binding['availability_zone'] return None, None def _update_dhcp_service_new_edge(self, context, resource_id): edge_id, az_name = self._get_edge_id_and_az_by_rtr_id( context, resource_id) if edge_id: with locking.LockManager.get_lock(str(edge_id)): if self.metadata_proxy_handler: LOG.debug('Update metadata for resource %s az=%s', resource_id, az_name) md_proxy = self.get_metadata_proxy_handler(az_name) if md_proxy: md_proxy.configure_router_edge(context, resource_id) self.setup_dhcp_edge_fw_rules(context, self, resource_id) def _update_dhcp_service_with_subnet(self, context, subnet): network_id = subnet['network_id'] # Create DHCP port port_dict = {'name': '', 'admin_state_up': True, 'network_id': network_id, 'tenant_id': subnet['tenant_id'], 'fixed_ips': [{'subnet_id': subnet['id']}], 'device_owner': constants.DEVICE_OWNER_DHCP, 'device_id': n_utils.get_dhcp_agent_device_id( network_id, 'nsxv'), 'mac_address': constants.ATTR_NOT_SPECIFIED } self.create_port(context, {'port': port_dict}) try: self.edge_manager.create_dhcp_edge_service(context, network_id, subnet) # Create all dhcp ports within the network self._update_dhcp_address(context, network_id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Failed to update DHCP for subnet %s", subnet['id']) def setup_dhcp_edge_fw_rules(self, context, plugin, router_id): rules = [] loose_ver = version.LooseVersion(self.nsx_v.vcns.get_version()) if loose_ver < version.LooseVersion('6.3.2'): # For these versions the raw icmp rule will not work due to # backend bug. Workaround: use applications, but since # application ids can change, we look them up by application name try: application_ids = plugin.nsx_v.get_icmp_echo_application_ids() rules = [{"name": "ICMPPing", "enabled": True, "action": "allow", "application": { "applicationId": application_ids}}] except Exception as e: LOG.error( 'Could not find ICMP Echo application. Exception %s', e) else: # For newer versions, we can use the raw icmp rule rules = [{"name": "ICMPPing", "enabled": True, "action": "allow", "protocol": "icmp", "icmp_type": 8}] if plugin.metadata_proxy_handler: rules += nsx_v_md_proxy.get_router_fw_rules() try: edge_utils.update_firewall(plugin.nsx_v, context, router_id, {'firewall_rule_list': rules}, allow_external=False) except Exception as e: # On failure, log that we couldn't configure the firewall on the # Edge appliance. This won't break the DHCP functionality LOG.error( 'Could not set up DHCP Edge firewall. Exception %s', e) def _create_network_dhcp_address_group(self, context, network_id): """Create dhcp address group for subnets attached to the network.""" filters = {'network_id': [network_id], 'device_owner': [constants.DEVICE_OWNER_DHCP]} ports = self.get_ports(context, filters=filters) filters = {'network_id': [network_id], 'enable_dhcp': [True]} subnets = self.get_subnets(context, filters=filters) address_groups = [] for subnet in subnets: address_group = {} ip_found = False for port in ports: fixed_ips = port['fixed_ips'] for fip in fixed_ips: s_id = fip['subnet_id'] ip_addr = fip['ip_address'] if s_id == subnet['id'] and self._is_valid_ip(ip_addr): address_group['primaryAddress'] = ip_addr ip_found = True break if ip_found: net = netaddr.IPNetwork(subnet['cidr']) address_group['subnetPrefixLength'] = str(net.prefixlen) address_groups.append(address_group) LOG.debug("Update the DHCP address group to %s", address_groups) return address_groups def _validate_router_size(self, router): # Check if router-size is specified. router-size can only be specified # for an exclusive non-distributed router; else raise a BadRequest # exception. r = router['router'] if validators.is_attr_set(r.get(ROUTER_SIZE)): if r.get('router_type') == nsxv_constants.SHARED: msg = _("Cannot specify router-size for shared router") raise n_exc.BadRequest(resource="router", msg=msg) elif r.get('distributed') is True: msg = _("Cannot specify router-size for distributed router") raise n_exc.BadRequest(resource="router", msg=msg) else: if r.get('router_type') == nsxv_constants.EXCLUSIVE: r[ROUTER_SIZE] = cfg.CONF.nsxv.exclusive_router_appliance_size def _get_router_flavor_profile(self, context, flavor_id): flv_plugin = directory.get_plugin(plugin_const.FLAVORS) if not flv_plugin: msg = _("Flavors plugin not found") raise n_exc.BadRequest(resource="router", msg=msg) # Will raise FlavorNotFound if doesn't exist fl_db = flavors_plugin.FlavorsPlugin.get_flavor( flv_plugin, context, flavor_id) if fl_db['service_type'] != plugin_const.L3: raise n_exc.InvalidFlavorServiceType( service_type=fl_db['service_type']) if not fl_db['enabled']: raise flav_exc.FlavorDisabled() # get the profile (Currently only 1 is supported, so take the first) if not fl_db['service_profiles']: return profile_id = fl_db['service_profiles'][0] return flavors_plugin.FlavorsPlugin.get_service_profile( flv_plugin, context, profile_id) def _get_flavor_metainfo_from_profile(self, flavor_id, flavor_profile): if not flavor_profile: return {} metainfo_string = flavor_profile.get('metainfo').replace("'", "\"") try: metainfo = jsonutils.loads(metainfo_string) if not isinstance(metainfo, dict): LOG.warning("Skipping router flavor %(flavor)s metainfo " "[%(metainfo)s]: expected a dictionary", {'flavor': flavor_id, 'metainfo': metainfo_string}) metainfo = {} except ValueError as e: LOG.warning("Error reading router flavor %(flavor)s metainfo " "[%(metainfo)s]: %(error)s", {'flavor': flavor_id, 'metainfo': metainfo_string, 'error': e}) metainfo = {} return metainfo def get_flavor_metainfo(self, context, flavor_id): """Retrieve metainfo from first profile of specified flavor""" flavor_profile = self._get_router_flavor_profile(context, flavor_id) return self._get_flavor_metainfo_from_profile(flavor_id, flavor_profile) def _get_router_config_from_flavor(self, context, router): """Validate the router flavor and initialize router data Validate that the flavor is legit, and that contradicting configuration does not exist. Also update the router data to reflect the selected flavor. """ if not validators.is_attr_set(router.get('flavor_id')): return metainfo = self.get_flavor_metainfo(context, router['flavor_id']) # Go over the attributes of the metainfo allowed_keys = [ROUTER_SIZE, 'router_type', 'distributed', az_def.AZ_HINTS] # This info will be used later on # and is not part of standard router config future_use_keys = ['syslog'] for k, v in metainfo.items(): if k in allowed_keys: #special case for availability zones hints which are an array if k == az_def.AZ_HINTS: if not isinstance(v, list): v = [v] # The default az hists is an empty array if (validators.is_attr_set(router.get(k)) and len(router[k]) > 0): msg = (_("Cannot specify %s if the flavor profile " "defines it") % k) raise n_exc.BadRequest(resource="router", msg=msg) elif validators.is_attr_set(router.get(k)) and router[k] != v: msg = _("Cannot specify %s if the flavor defines it") % k raise n_exc.BadRequest(resource="router", msg=msg) # Legal value router[k] = v elif k in future_use_keys: pass else: LOG.warning("Skipping router flavor metainfo [%(k)s:%(v)s]" ":unsupported field", {'k': k, 'v': v}) def _process_extra_attr_router_create(self, context, router_db, r): for extra_attr in l3_attrs_db.get_attr_info().keys(): if (extra_attr in r and validators.is_attr_set(r.get(extra_attr))): self.set_extra_attr_value(context, router_db, extra_attr, r[extra_attr]) def create_router(self, context, router, allow_metadata=True): r = router['router'] self._get_router_config_from_flavor(context, r) self._decide_router_type(context, r) self._validate_router_size(router) self._validate_availability_zones_in_obj(context, 'router', r) # First extract the gateway info in case of updating # gateway before edge is deployed. # TODO(berlin): admin_state_up and routes update gw_info = self._extract_external_gw(context, router) lrouter = super(NsxVPluginV2, self).create_router(context, router) with db_api.context_manager.writer.using(context): router_db = self._get_router(context, lrouter['id']) self._process_extra_attr_router_create(context, router_db, r) self._process_nsx_router_create(context, router_db, r) self._process_router_flavor_create(context, router_db, r) with db_api.context_manager.reader.using(context): lrouter = super(NsxVPluginV2, self).get_router(context, lrouter['id']) try: router_driver = self._get_router_driver(context, router_db) if router_driver.get_type() == nsxv_constants.EXCLUSIVE: router_driver.create_router( context, lrouter, appliance_size=r.get(ROUTER_SIZE), allow_metadata=(allow_metadata and self.metadata_proxy_handler)) else: router_driver.create_router( context, lrouter, allow_metadata=(allow_metadata and self.metadata_proxy_handler)) if gw_info != constants.ATTR_NOT_SPECIFIED and gw_info: self._update_router_gw_info( context, lrouter['id'], gw_info) except Exception: LOG.exception("Failed to create router %s", router) with excutils.save_and_reraise_exception(): self.delete_router(context, lrouter['id']) # re-read the router with the updated data, and return it with db_api.context_manager.reader.using(context): return self.get_router(context, lrouter['id']) def _validate_router_migration(self, context, router_id, new_router_type, router): if new_router_type == 'shared': # shared router cannot have static routes # verify that the original router did not have static routes err_msg = _('Unable to create a shared router with static routes') routes = self._get_extra_routes_by_router_id(context, router_id) if len(routes) > 0: raise n_exc.InvalidInput(error_message=err_msg) # verify that the updated router does not have static routes if (validators.is_attr_set(router.get("routes")) and len(router['routes']) > 0): raise n_exc.InvalidInput(error_message=err_msg) def update_router(self, context, router_id, router): with locking.LockManager.get_lock('router-%s' % router_id): return self._safe_update_router(context, router_id, router) def _safe_update_router(self, context, router_id, router): # Validate that the gateway information is relevant gw_info = self._extract_external_gw(context, router, is_extract=False) # Toggling the distributed flag is not supported if 'distributed' in router['router']: r = self.get_router(context, router_id) if r['distributed'] != router['router']['distributed']: err_msg = _('Unable to update distributed mode') raise n_exc.InvalidInput(error_message=err_msg) # Toggling router type is supported only for non-distributed router elif 'router_type' in router['router']: r = self.get_router(context, router_id) if r.get('router_type') != router['router']['router_type']: if r["distributed"]: err_msg = _('Unable to update distributed mode') raise n_exc.InvalidInput(error_message=err_msg) else: # should migrate the router because its type changed new_router_type = router['router']['router_type'] self._validate_router_size(router) self._validate_router_migration( context, router_id, new_router_type, r) # remove the router from the old pool, and free resources old_router_driver = \ self._router_managers.get_tenant_router_driver( context, r['router_type']) old_router_driver.detach_router(context, router_id, router) # update the router-type with db_api.context_manager.writer.using(context): router_db = self._get_router(context, router_id) self._process_nsx_router_create( context, router_db, router['router']) # update availability zone router['router']['availability_zone_hints'] = r.get( 'availability_zone_hints') # add the router to the new pool appliance_size = router['router'].get(ROUTER_SIZE) new_router_driver = \ self._router_managers.get_tenant_router_driver( context, new_router_type) new_router_driver.attach_router( context, router_id, router, appliance_size=appliance_size) # continue to update the router with the new driver # but remove the router-size that was already updated router['router'].pop(ROUTER_SIZE, None) if (validators.is_attr_set(gw_info) and not gw_info.get('enable_snat', cfg.CONF.enable_snat_by_default)): router_ports = self._get_router_interfaces(context, router_id) for port in router_ports: for fip in port['fixed_ips']: self._validate_address_scope_for_router_interface( context.elevated(), router_id, gw_info['network_id'], fip['subnet_id']) router_driver = self._find_router_driver(context, router_id) return router_driver.update_router(context, router_id, router) def _check_router_in_use(self, context, router_id): with db_api.context_manager.reader.using(context): # Ensure that the router is not used router_filter = {'router_id': [router_id]} fips = self.get_floatingips_count(context.elevated(), filters=router_filter) if fips: raise l3_exc.RouterInUse(router_id=router_id) device_filter = {'device_id': [router_id], 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} ports = self.get_ports_count(context.elevated(), filters=device_filter) if ports: raise l3_exc.RouterInUse(router_id=router_id) if nsxv_db.get_nsxv_internal_edge_by_router( context.elevated().session, router_id): msg = _("Cannot delete internal router %s") % router_id raise n_exc.InvalidInput(error_message=msg) def delete_router(self, context, id): self._check_router_in_use(context, id) router_driver = self._find_router_driver(context, id) # Clear vdr's gw relative components if the router has gw info if router_driver.get_type() == "distributed": router = self.get_router(context, id) if router.get(l3_apidef.EXTERNAL_GW_INFO): try: router_driver._update_router_gw_info(context, id, {}) except Exception as e: # Do not fail router deletion LOG.error("Failed to remove router %(rtr)s GW info before " "deletion: %(e)s", {'e': e, 'rtr': id}) super(NsxVPluginV2, self).delete_router(context, id) router_driver.delete_router(context, id) def get_availability_zone_name_by_edge(self, context, edge_id): az_name = nsxv_db.get_edge_availability_zone( context.session, edge_id) if az_name: return az_name # fallback return nsx_az.DEFAULT_NAME def get_network_availability_zones(self, net_db): context = n_context.get_admin_context() return self._get_network_availability_zones(context, net_db) def _get_network_availability_zones(self, context, net_db): """Return availability zones which a network belongs to. Return only the actual az the dhcp edge is deployed on. If there is no edge - the availability zones list is empty. """ resource_id = (vcns_const.DHCP_EDGE_PREFIX + net_db["id"])[:36] dhcp_edge_binding = nsxv_db.get_nsxv_router_binding( context.session, resource_id) if dhcp_edge_binding: return [dhcp_edge_binding['availability_zone']] return [] def get_router_availability_zones(self, router): """Return availability zones which a router belongs to. Return only the actual az the router edge is deployed on. If there is no edge - the availability zones list is empty. """ context = n_context.get_admin_context() binding = nsxv_db.get_nsxv_router_binding( context.session, router['id']) if binding: return [binding['availability_zone']] return [] def _process_router_flavor_create(self, context, router_db, r): """Update the router DB structure with the flavor ID upon creation """ if validators.is_attr_set(r.get('flavor_id')): router_db.flavor_id = r['flavor_id'] @staticmethod @resource_extend.extends([l3_apidef.ROUTERS]) def add_flavor_id(router_res, router_db): router_res['flavor_id'] = router_db['flavor_id'] def get_router(self, context, id, fields=None): router = super(NsxVPluginV2, self).get_router(context, id, fields) if router.get("distributed") and 'router_type' in router: del router['router_type'] if router.get("router_type") == nsxv_constants.EXCLUSIVE: binding = nsxv_db.get_nsxv_router_binding(context.session, router.get("id")) if binding: router[ROUTER_SIZE] = binding.get("appliance_size") else: LOG.error("No binding for router %s", id) return router def _get_external_attachment_info(self, context, router): gw_port = router.gw_port ipaddress = None netmask = None nexthop = None if gw_port: # TODO(berlin): we can only support gw port with one fixed ip at # present. if gw_port.get('fixed_ips'): ipaddress = gw_port['fixed_ips'][0]['ip_address'] subnet_id = gw_port['fixed_ips'][0]['subnet_id'] subnet = self.get_subnet(context.elevated(), subnet_id) nexthop = subnet['gateway_ip'] network_id = gw_port.get('network_id') if network_id: ext_net = self._get_network(context, network_id) if not ext_net.external: msg = (_("Network '%s' is not a valid external " "network") % network_id) raise n_exc.BadRequest(resource='router', msg=msg) if ext_net.subnets: netmask = set([str(ext_subnet.cidr) for ext_subnet in ext_net.subnets]) return (ipaddress, netmask, nexthop) def _add_network_info_for_routes(self, context, routes, ports): for route in routes: for port in ports: for ip in port['fixed_ips']: subnet = self.get_subnet(context.elevated(), ip['subnet_id']) if netaddr.all_matching_cidrs( route['nexthop'], [subnet['cidr']]): net = self.get_network(context.elevated(), subnet['network_id']) route['network_id'] = net['id'] if net.get(extnet_apidef.EXTERNAL): route['external'] = True def _prepare_edge_extra_routes(self, context, router_id): routes = self._get_extra_routes_by_router_id(context, router_id) filters = {'device_id': [router_id]} ports = self.get_ports(context, filters) self._add_network_info_for_routes(context, routes, ports) return routes def _update_routes(self, context, router_id, nexthop): routes = self._prepare_edge_extra_routes(context, router_id) edge_utils.update_routes(self.nsx_v, context, router_id, routes, nexthop) def _update_current_gw_port(self, context, router_id, router, ext_ips): """Override this function in order not to call plugins' update_port since the actual backend work was already done by the router driver, and it may cause a deadlock. """ port_data = {'fixed_ips': ext_ips} updated_port = super(NsxVPluginV2, self).update_port( context, router.gw_port['id'], {'port': port_data}) self._extension_manager.process_update_port( context, port_data, updated_port) registry.notify(resources.ROUTER_GATEWAY, events.AFTER_UPDATE, self._update_current_gw_port, context=context, router_id=router_id, router=router, network_id=router.gw_port.network_id, updated_port=updated_port) context.session.expire(router.gw_port) def _update_router_gw_info(self, context, router_id, info, is_routes_update=False, force_update=False): router_driver = self._find_router_driver(context, router_id) if info: try: ext_ips = info.get('external_fixed_ips') network_id = info.get('network_id') router_db = self._get_router(context, router_id) org_enable_snat = router_db.enable_snat # Ensure that a router cannot have SNAT disabled if there are # floating IP's assigned if ('enable_snat' in info and org_enable_snat != info.get('enable_snat') and info.get('enable_snat') is False and self.router_gw_port_has_floating_ips(context, router_id)): msg = _("Unable to set SNAT disabled. Floating IPs " "assigned.") raise n_exc.InvalidInput(error_message=msg) # for multiple external subnets support, we need to set gw # port first on subnet which has gateway. If can't get one # subnet with gateway or allocate one available ip from # subnet, we would just enter normal logic and admin should # exactly know what he did. if (not ext_ips and network_id and (not router_db.gw_port or not router_db.gw_port.get('fixed_ips'))): net_id_filter = {'network_id': [network_id]} subnets = self.get_subnets(context, filters=net_id_filter) fixed_subnet = True if len(subnets) <= 1: fixed_subnet = False else: for subnet in subnets: if ipv6_utils.is_auto_address_subnet(subnet): fixed_subnet = False if fixed_subnet: for subnet in subnets: if not subnet['gateway_ip']: continue try: info['external_fixed_ips'] = [{ 'subnet_id': subnet['id']}] return router_driver._update_router_gw_info( context, router_id, info, is_routes_update=is_routes_update) except n_exc.IpAddressGenerationFailure: del info['external_fixed_ips'] LOG.warning("Cannot get one subnet with gateway " "to allocate one available gw ip") router_driver._update_router_gw_info( context, router_id, info, is_routes_update=is_routes_update, force_update=force_update) except vsh_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.error("Failed to update gw_info %(info)s on " "router %(router_id)s", {'info': str(info), 'router_id': router_id}) router_driver._update_router_gw_info( context, router_id, {}, is_routes_update=is_routes_update, force_update=force_update) else: router_driver._update_router_gw_info( context, router_id, info, is_routes_update=is_routes_update, force_update=force_update) def _get_internal_network_ids_by_router(self, context, router_id): ports_qry = context.session.query(models_v2.Port) intf_ports = ports_qry.filter_by( device_id=router_id, device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF).all() intf_net_ids = list(set([port['network_id'] for port in intf_ports])) return intf_net_ids def _get_address_groups(self, context, router_id, network_id): address_groups = [] ports = self._get_router_interface_ports_by_network( context, router_id, network_id) for port in ports: address_group = {} gateway_ip = port['fixed_ips'][0]['ip_address'] subnet = self.get_subnet(context, port['fixed_ips'][0]['subnet_id']) prefixlen = str(netaddr.IPNetwork(subnet['cidr']).prefixlen) address_group['primaryAddress'] = gateway_ip address_group['subnetPrefixLength'] = prefixlen address_groups.append(address_group) return address_groups def _get_nat_rules(self, context, router): fip_qry = context.session.query(l3_db_models.FloatingIP) fip_db = fip_qry.filter_by(router_id=router['id']).all() snat = [] dnat = [{'dst': fip.floating_ip_address, 'translated': fip.fixed_ip_address} for fip in fip_db if fip.fixed_port_id] gw_port = router.gw_port if gw_port and gw_port.get('fixed_ips') and router.enable_snat: snat_ip = gw_port['fixed_ips'][0]['ip_address'] subnets = self._find_router_subnets(context.elevated(), router['id']) for subnet in subnets: # Do not build NAT rules for v6 if subnet.get('ip_version') == 6: continue # if the subnets address scope is the same as the gateways: # no need for SNAT gw_address_scope = self._get_network_address_scope( context.elevated(), gw_port['network_id']) subnet_address_scope = self._get_subnetpool_address_scope( context.elevated(), subnet['subnetpool_id']) if (gw_address_scope and gw_address_scope == subnet_address_scope): LOG.info("No need for SNAT rule for router %(router)s " "and subnet %(subnet)s because they use the " "same address scope %(addr_scope)s.", {'router': router['id'], 'subnet': subnet['id'], 'addr_scope': gw_address_scope}) continue snat.append({ 'src': subnet['cidr'], 'translated': snat_ip, 'vnic_index': vcns_const.EXTERNAL_VNIC_INDEX, }) return (snat, dnat) def _get_nosnat_subnets_fw_rules(self, context, router): """Open edge firewall holes for nosnat subnets to do static routes.""" no_snat_fw_rules = [] gw_port = router.gw_port if gw_port and not router.enable_snat: subnet_cidrs = self._find_router_subnets_cidrs(context.elevated(), router['id']) if subnet_cidrs: no_snat_fw_rules.append({ 'name': NO_SNAT_RULE_NAME, 'action': 'allow', 'enabled': True, 'source_vnic_groups': ["external"], 'destination_ip_address': subnet_cidrs}) return no_snat_fw_rules def _get_allocation_pools_fw_rule(self, context, router): """Get the firewall rule for the default gateway address pool Return the firewall rule that should be added in order to allow not SNAT-ed traffic to external gateway with the same address scope as the interfaces """ gw_port = router.gw_port if not gw_port or not router.enable_snat: return gw_address_scope = self._get_network_address_scope( context.elevated(), gw_port['network_id']) if gw_address_scope is None: return subnets = self._find_router_subnets(context.elevated(), router['id']) no_nat_cidrs = [] for subnet in subnets: # if the subnets address scope is the same as the gateways: # we should add it to the rule subnet_address_scope = self._get_subnetpool_address_scope( context.elevated(), subnet['subnetpool_id']) if (gw_address_scope == subnet_address_scope): no_nat_cidrs.append(subnet['cidr']) if no_nat_cidrs: return {'name': ALLOCATION_POOL_RULE_NAME, 'action': 'allow', 'enabled': True, 'source_vnic_groups': ["external"], 'destination_ip_address': no_nat_cidrs} def _get_dnat_fw_rule(self, context, router): # Get FW rule to open dnat firewall flows _, dnat_rules = self._get_nat_rules(context, router) dnat_cidrs = [rule['dst'] for rule in dnat_rules] if dnat_cidrs: return { 'name': DNAT_RULE_NAME, 'action': 'allow', 'enabled': True, 'destination_ip_address': dnat_cidrs} def _get_subnet_fw_rules(self, context, router): # Get FW rule/s to open subnets firewall flows and static routes # relative flows fw_rules = [] subnet_cidrs_per_ads = self._find_router_subnets_cidrs_per_addr_scope( context.elevated(), router['id']) routes = self._get_extra_routes_by_router_id(context, router['id']) routes_dest = [route['destination'] for route in routes] for subnet_cidrs in subnet_cidrs_per_ads: # create a rule to allow east-west traffic between subnets on this # address scope # Also add the static routes to each address scope ips = subnet_cidrs + routes_dest fw_rules.append({ 'name': SUBNET_RULE_NAME, 'action': 'allow', 'enabled': True, 'source_ip_address': ips, 'destination_ip_address': ips}) return fw_rules def _update_nat_rules(self, context, router, router_id=None): snat, dnat = self._get_nat_rules(context, router) if not router_id: router_id = router['id'] edge_utils.update_nat_rules( self.nsx_v, context, router_id, snat, dnat) def recalculate_snat_rules_for_router(self, context, router, subnets): """Recalculate router snat rules for specific subnets. Invoked when subnetpool address scope changes. """ # Recalculate all nat rules for all subnets of the router router_db = self._get_router(context, router['id']) self._update_nat_rules(context, router_db) def recalculate_fw_rules_for_router(self, context, router, subnets): """Recalculate router fw rules for specific subnets. Invoked when subnetpool address scope changes. """ # Recalculate all fw rules for all subnets of the router router_db = self._get_router(context, router['id']) self._update_subnets_and_dnat_firewall(context, router_db) def _check_intf_number_of_router(self, context, router_id): intf_ports = self._get_port_by_device_id( context, router_id, l3_db.DEVICE_OWNER_ROUTER_INTF) if len(intf_ports) >= (vcns_const.MAX_INTF_NUM): err_msg = _("Interfaces number on router: %(router_id)s " "has reached the maximum %(number)d which NSXv can " "support. Please use vdr if you want to add unlimited " "interfaces") % {'router_id': router_id, 'number': vcns_const.MAX_INTF_NUM} raise nsx_exc.ServiceOverQuota(overs="router-interface-add", err_msg=err_msg) def _update_router_admin_state(self, context, router_id, router_type, admin_state): # Collecting all router interfaces and updating the connection status # for each one to reflect the router admin-state-up status. intf_net_ids = ( self._get_internal_network_ids_by_router(context, router_id)) edge_id = self._get_edge_id_by_rtr_id(context, router_id) with locking.LockManager.get_lock(edge_id): for network_id in intf_net_ids: address_groups = ( self._get_address_groups(context, router_id, network_id)) update_args = (self.nsx_v, context, router_id, network_id, address_groups, admin_state) if router_type == 'distributed': edge_utils.update_vdr_internal_interface(*update_args) else: edge_utils.update_internal_interface(*update_args) def _get_interface_info(self, context, interface_info): is_port, is_sub = self._validate_interface_info(interface_info) if is_port: port = self._check_router_port(context, interface_info['port_id'], '') subnet_id = port['fixed_ips'][0]['subnet_id'] net_id = port['network_id'] elif is_sub: subnet_id = interface_info['subnet_id'] net_id = self.get_subnet( context, subnet_id)['network_id'] return net_id, subnet_id def add_router_interface(self, context, router_id, interface_info): router = self.get_router(context, router_id) net_id, subnet_id = self._get_interface_info(context, interface_info) network = self.get_network(context.elevated(), net_id) # Do not support external subnet/port as a router interface if network.get(extnet_apidef.EXTERNAL): msg = _("cannot add an external subnet/port as a router interface") raise n_exc.InvalidInput(error_message=msg) snat_disabled = (router[l3_apidef.EXTERNAL_GW_INFO] and not router[l3_apidef.EXTERNAL_GW_INFO]['enable_snat']) if snat_disabled and subnet_id: gw_network_id = router[l3_apidef.EXTERNAL_GW_INFO]['network_id'] self._validate_address_scope_for_router_interface( context.elevated(), router_id, gw_network_id, subnet_id) router_driver = self._find_router_driver(context, router_id) try: return router_driver.add_router_interface( context, router_id, interface_info) except vsh_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.error("Failed to add interface_info %(info)s on " "router %(router_id)s", {'info': str(interface_info), 'router_id': router_id}) router_driver.remove_router_interface( context, router_id, interface_info) def remove_router_interface(self, context, router_id, interface_info): router_driver = self._find_router_driver(context, router_id) return router_driver.remove_router_interface( context, router_id, interface_info) def _get_floatingips_by_router(self, context, router_id): fip_qry = context.session.query(l3_db_models.FloatingIP) fip_db = fip_qry.filter_by(router_id=router_id).all() return [fip.floating_ip_address for fip in fip_db if fip.fixed_port_id] def _update_external_interface(self, context, router, router_id=None): ext_net_id = router.gw_port_id and router.gw_port.network_id addr, mask, nexthop = self._get_external_attachment_info( context, router) secondary = self._get_floatingips_by_router(context, router['id']) if not router_id: router_id = router['id'] self.edge_manager.update_external_interface( self.nsx_v, context, router_id, ext_net_id, addr, mask, secondary) def _set_floatingip_status(self, context, floatingip_db, status=None): if not status: status = (constants.FLOATINGIP_STATUS_ACTIVE if floatingip_db.get('router_id') else constants.FLOATINGIP_STATUS_DOWN) if floatingip_db['status'] != status: floatingip_db['status'] = status self.update_floatingip_status(context, floatingip_db['id'], status) def _update_edge_router(self, context, router_id): router_driver = self._find_router_driver(context, router_id) router_driver._update_edge_router(context, router_id) def create_floatingip(self, context, floatingip): fip_db = super(NsxVPluginV2, self).create_floatingip( context, floatingip) router_id = fip_db['router_id'] if router_id: try: self._update_edge_router(context, router_id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Failed to update edge router") super(NsxVPluginV2, self).delete_floatingip(context, fip_db['id']) self._set_floatingip_status(context, fip_db) return fip_db def update_floatingip(self, context, id, floatingip): old_fip = self._get_floatingip(context, id) old_router_id = old_fip.router_id old_port_id = old_fip.fixed_port_id fip_db = super(NsxVPluginV2, self).update_floatingip( context, id, floatingip) router_id = fip_db.get('router_id') try: # Update old router's nat rules if old_router_id is not None. if old_router_id: self._update_edge_router(context, old_router_id) # Update current router's nat rules if router_id is not None. if router_id: self._update_edge_router(context, router_id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Failed to update edge router") super(NsxVPluginV2, self).update_floatingip( context, id, {'floatingip': {'port_id': old_port_id}}) self._set_floatingip_status(context, fip_db) return fip_db def delete_floatingip(self, context, id): fip_db = self._get_floatingip(context, id) router_id = None if fip_db.fixed_port_id: router_id = fip_db.router_id super(NsxVPluginV2, self).delete_floatingip(context, id) if router_id: self._update_edge_router(context, router_id) def disassociate_floatingips(self, context, port_id): router_id = None try: fip_qry = context.session.query(l3_db_models.FloatingIP) fip_db = fip_qry.filter_by(fixed_port_id=port_id) for fip in fip_db: if fip.router_id: router_id = fip.router_id break except sa_exc.NoResultFound: router_id = None super(NsxVPluginV2, self).disassociate_floatingips(context, port_id) if router_id: self._update_edge_router(context, router_id) def _update_subnets_and_dnat_firewall(self, context, router_db, router_id=None): """Update the router edge firewall with all the relevant rules. router_db is the neutron router structure router_id is the id of the actual router that will be updated on the NSX (in case of distributed router it can be plr or tlr) """ if not router_id: router_id = router_db['id'] # Add fw rules if FWaaS is enabled # in case of a distributed-router: # router['id'] is the id of the neutron router (=tlr) # and router_id is the plr/tlr (the one that is being updated) fwaas_rules = None if (self.fwaas_callbacks.should_apply_firewall_to_router( context, router_db, router_id)): fwaas_rules = self.fwaas_callbacks.get_fwaas_rules_for_router( context, router_db['id']) self.update_router_firewall(context, router_id, router_db, fwaas_rules=fwaas_rules) def update_router_firewall(self, context, router_id, router_db, fwaas_rules=None): """Recreate all rules in the router edge firewall router_db is the neutron router structure router_id is the id of the actual router that will be updated on the NSX (in case of distributed router it can be plr or tlr) if fwaas_rules is not none - this router is attached to a firewall """ fw_rules = [] router_with_firewall = True if fwaas_rules is not None else False edge_id = self._get_edge_id_by_rtr_id(context, router_id) # Add FW rule/s to open subnets firewall flows and static routes # relative flows subnet_rules = self._get_subnet_fw_rules(context, router_db) if subnet_rules: fw_rules.extend(subnet_rules) # If metadata service is enabled, block access to inter-edge network if self.metadata_proxy_handler: fw_rules += nsx_v_md_proxy.get_router_fw_rules() # Add FWaaS rules if router_with_firewall and fwaas_rules: fw_rules += fwaas_rules if not router_with_firewall: dnat_rule = self._get_dnat_fw_rule(context, router_db) if dnat_rule: fw_rules.append(dnat_rule) # Add rule for not NAT-ed allocation pools alloc_pool_rule = self._get_allocation_pools_fw_rule( context, router_db) if alloc_pool_rule: fw_rules.append(alloc_pool_rule) # Add no-snat rules nosnat_fw_rules = self._get_nosnat_subnets_fw_rules( context, router_db) fw_rules.extend(nosnat_fw_rules) vpn_plugin = directory.get_plugin(plugin_const.VPN) if vpn_plugin: vpn_driver = vpn_plugin.drivers[vpn_plugin.default_provider] vpn_rules = ( vpn_driver._generate_ipsecvpn_firewall_rules( self.plugin_type(), context, edge_id=edge_id)) fw_rules.extend(vpn_rules) # Get the load balancer rules in case they are refreshed # (relevant only for older LB that are still on the router edge) lb_rules = nsxv_db.get_nsxv_lbaas_loadbalancer_binding_by_edge( context.session, edge_id) for rule in lb_rules: vsm_rule = self.nsx_v.vcns.get_firewall_rule( edge_id, rule['edge_fw_rule_id'])[1] lb_fw_rule = { 'action': edge_firewall_driver.FWAAS_ALLOW, 'enabled': vsm_rule['enabled'], 'destination_ip_address': vsm_rule['destination']['ipAddress'], 'name': vsm_rule['name'], 'ruleId': vsm_rule['ruleId'] } fw_rules.append(lb_fw_rule) fw = {'firewall_rule_list': fw_rules} try: # If we have a firewall we shouldn't add the default # allow-external rule allow_external = False if router_with_firewall else True edge_utils.update_firewall(self.nsx_v, context, router_id, fw, allow_external=allow_external) except vsh_exc.ResourceNotFound: LOG.error("Failed to update firewall for router %s", router_id) def _delete_nsx_security_group(self, nsx_sg_id, nsx_policy): """Helper method to delete nsx security group.""" if nsx_sg_id is not None: if nsx_policy: # First remove this security group from the NSX policy, # Or else the delete will fail try: with locking.LockManager.get_lock( 'neutron-security-policy-' + str(nsx_policy)): self.nsx_sg_utils.del_nsx_security_group_from_policy( nsx_policy, nsx_sg_id) except Exception as e: LOG.warning("Failed to remove nsx security group " "%(id)s from policy %(pol)s : %(e)s", {'id': nsx_sg_id, 'pol': nsx_policy, 'e': e}) self.nsx_v.vcns.delete_security_group(nsx_sg_id) # Security group handling section # def _delete_section(self, section_uri): """Helper method to delete nsx rule section.""" if section_uri is not None: self.nsx_v.vcns.delete_section(section_uri) def _get_section_uri(self, session, security_group_id): mapping = nsxv_db.get_nsx_section(session, security_group_id) if mapping is not None: return mapping['ip_section_id'] def _create_fw_section_for_security_group(self, context, securitygroup, nsx_sg_id): logging = (cfg.CONF.nsxv.log_security_groups_allowed_traffic or securitygroup[sg_logging.LOGGING]) action = 'deny' if securitygroup[provider_sg.PROVIDER] else 'allow' section_name = self.nsx_sg_utils.get_nsx_section_name(securitygroup) nsx_rules = [] # Translate Neutron rules to NSXv fw rules and construct the fw section for rule in securitygroup['security_group_rules']: nsx_rule = self._create_nsx_rule( context, rule, nsx_sg_id, logged=logging, action=action) nsx_rules.append(nsx_rule) section = self.nsx_sg_utils.get_section_with_rules( section_name, nsx_rules) # Execute REST API for creating the section h, c = self.nsx_v.vcns.create_section( 'ip', self.nsx_sg_utils.to_xml_string(section), insert_top=securitygroup[provider_sg.PROVIDER], insert_before=self.default_section) rule_pairs = self.nsx_sg_utils.get_rule_id_pair_from_section(c) # Add database associations for fw section and rules nsxv_db.add_neutron_nsx_section_mapping( context.session, securitygroup['id'], h['location']) for pair in rule_pairs: # Save nsx rule id in the DB for future access nsxv_db.add_neutron_nsx_rule_mapping( context.session, pair['neutron_id'], pair['nsx_id']) def _create_nsx_security_group(self, context, securitygroup): nsx_sg_name = self.nsx_sg_utils.get_nsx_sg_name(securitygroup) # NSX security-group config sg_dict = {"securitygroup": {"name": nsx_sg_name, "description": securitygroup['description']}} # Create the nsx security group h, nsx_sg_id = self.nsx_v.vcns.create_security_group(sg_dict) # Save moref in the DB for future access nsx_db.add_neutron_nsx_security_group_mapping( context.session, securitygroup['id'], nsx_sg_id) return nsx_sg_id def _process_security_group_create_backend_resources(self, context, securitygroup): nsx_sg_id = self._create_nsx_security_group(context, securitygroup) policy = securitygroup.get(sg_policy.POLICY) if self._use_nsx_policies and policy: # When using policies - no rules should be created. # just add the security group to the policy on the backend. self._update_nsx_security_group_policies( policy, None, nsx_sg_id) else: try: self._create_fw_section_for_security_group( context, securitygroup, nsx_sg_id) except Exception: with excutils.save_and_reraise_exception(): self._delete_nsx_security_group(nsx_sg_id, policy) if not securitygroup[provider_sg.PROVIDER]: # Add Security Group to the Security Groups container in order to # apply the default block rule. # This is relevant for policies security groups too. # provider security-groups should not have a default blocking rule. self._add_member_to_security_group(self.sg_container_id, nsx_sg_id) def _validate_security_group(self, context, security_group, default_sg, id=None): if self._use_nsx_policies: new_policy = None sg_with_policy = False if not id: # called from create_security_group # must have a policy: if not security_group.get(sg_policy.POLICY): if default_sg: # For default sg the default policy will be used security_group[sg_policy.POLICY] = ( cfg.CONF.nsxv.default_policy_id) elif not cfg.CONF.nsxv.allow_tenant_rules_with_policy: if context.is_admin: msg = _('A security group must be assigned to a ' 'policy') else: msg = _('Creation of security group is not ' 'allowed') raise n_exc.InvalidInput(error_message=msg) new_policy = security_group.get(sg_policy.POLICY) sg_with_policy = True if new_policy else False else: # called from update_security_group. # Check if the existing security group has policy or not sg_with_policy = self._is_policy_security_group(context, id) if sg_policy.POLICY in security_group: new_policy = security_group[sg_policy.POLICY] if sg_with_policy and not new_policy: # cannot remove a policy from an existing sg msg = (_('Security group %s must be assigned to a ' 'policy') % id) raise n_exc.InvalidInput(error_message=msg) if not sg_with_policy and new_policy: # cannot add a policy to a non-policy security group msg = (_('Cannot add policy to an existing security ' 'group %s') % id) raise n_exc.InvalidInput(error_message=msg) # validate that the new policy exists (and not hidden) by using the # plugin getter that raises an exception if it fails. if new_policy: try: policy_obj = self.get_nsx_policy(context, new_policy) except n_exc.ObjectNotFound: msg = _('Policy %s was not found on the NSX') % new_policy raise n_exc.InvalidInput(error_message=msg) # Do not support logging with policy if sg_with_policy and security_group.get(sg_logging.LOGGING): msg = _('Cannot support logging when using NSX policies') raise n_exc.InvalidInput(error_message=msg) # Use the NSX policy description as the description of this # security group if the description was not set by the user # and the security group is new or policy was updated # if the nsx policy has not description - use its name if new_policy and not security_group.get('description'): security_group['description'] = ( policy_obj.get('description') or policy_obj.get('name'))[:db_const.DESCRIPTION_FIELD_SIZE] else: # must not have a policy: if security_group.get(sg_policy.POLICY): msg = _('The security group cannot be assigned to a policy') raise n_exc.InvalidInput(error_message=msg) def create_security_group(self, context, security_group, default_sg=False): """Create a security group.""" sg_data = security_group['security_group'] sg_id = sg_data["id"] = str(uuid.uuid4()) self._validate_security_group(context, sg_data, default_sg) with db_api.context_manager.writer.using(context): is_provider = True if sg_data.get(provider_sg.PROVIDER) else False is_policy = True if sg_data.get(sg_policy.POLICY) else False if is_provider or is_policy: new_sg = self.create_security_group_without_rules( context, security_group, default_sg, is_provider) else: new_sg = super(NsxVPluginV2, self).create_security_group( context, security_group, default_sg) self._process_security_group_properties_create( context, new_sg, sg_data, default_sg) try: self._process_security_group_create_backend_resources( context, new_sg) except Exception: # Couldn't create backend resources, rolling back neutron db # changes. with excutils.save_and_reraise_exception(): # Delete security-group and its associations from database, # Only admin can delete the default security-group if default_sg: context = context.elevated() super(NsxVPluginV2, self).delete_security_group(context, sg_id) LOG.exception('Failed to create security group') return new_sg def _update_security_group_with_policy(self, updated_group, sg_data, nsx_sg_id): """Handle security group update when using NSX policies Remove the security group from the old policies, and apply on the new policies """ # Verify that the policy was not removed from the security group if (sg_policy.POLICY in updated_group and not updated_group[sg_policy.POLICY]): msg = _('It is not allowed to remove the policy from security ' 'group %s') % nsx_sg_id raise n_exc.InvalidInput(error_message=msg) if (updated_group.get(sg_policy.POLICY) and updated_group[sg_policy.POLICY] != sg_data[sg_policy.POLICY]): new_policy = updated_group[sg_policy.POLICY] old_policy = sg_data[sg_policy.POLICY] self._update_nsx_security_group_policies( new_policy, old_policy, nsx_sg_id) def _update_nsx_security_group_policies(self, new_policy, old_policy, nsx_sg_id): # update the NSX security group to use this policy if old_policy: with locking.LockManager.get_lock( 'neutron-security-policy-' + str(old_policy)): self.nsx_sg_utils.del_nsx_security_group_from_policy( old_policy, nsx_sg_id) with locking.LockManager.get_lock( 'neutron-security-policy-' + str(new_policy)): self.nsx_sg_utils.add_nsx_security_group_to_policy( new_policy, nsx_sg_id) def update_security_group(self, context, id, security_group): s = security_group['security_group'] self._validate_security_group(context, s, False, id=id) nsx_sg_id = nsx_db.get_nsx_security_group_id(context.session, id, moref=True) section_uri = self._get_section_uri(context.session, id) section_needs_update = False sg_data = super(NsxVPluginV2, self).update_security_group( context, id, security_group) # Reflect security-group name or description changes in the backend, if set(['name', 'description']) & set(s.keys()): nsx_sg_name = self.nsx_sg_utils.get_nsx_sg_name(sg_data) section_name = self.nsx_sg_utils.get_nsx_section_name(sg_data) self.nsx_v.vcns.update_security_group( nsx_sg_id, nsx_sg_name, sg_data['description']) # security groups with NSX policy - update the backend policy attached # to the security group if (self._use_nsx_policies and self._is_policy_security_group(context, id)): if sg_policy.POLICY in sg_data: self._update_security_group_with_policy(s, sg_data, nsx_sg_id) # The rest of the update are not relevant to policies security # groups as there is no matching section self._process_security_group_properties_update( context, sg_data, s) return sg_data with locking.LockManager.get_lock('rule-update-%s' % id): # Get the backend section matching this security group h, c = self.nsx_v.vcns.get_section(section_uri) section = self.nsx_sg_utils.parse_section(c) # dfw section name needs to be updated if the sg name was modified if 'name' in s.keys(): section.attrib['name'] = section_name section_needs_update = True # Update the dfw section if security-group logging option has # changed. log_all_rules = cfg.CONF.nsxv.log_security_groups_allowed_traffic self._process_security_group_properties_update(context, sg_data, s) if not log_all_rules and context.is_admin: section_needs_update |= ( self.nsx_sg_utils.set_rules_logged_option( section, sg_data[sg_logging.LOGGING])) if section_needs_update: # update the section with all the modifications self.nsx_v.vcns.update_section( section_uri, self.nsx_sg_utils.to_xml_string(section), h) return sg_data def delete_security_group(self, context, id, delete_base=True): """Delete a security group.""" self._prevent_non_admin_delete_provider_sg(context, id) self._prevent_non_admin_delete_policy_sg(context, id) policy = self._get_security_group_policy(context, id) try: # Find nsx rule sections section_uri = self._get_section_uri(context.session, id) # Find nsx security group nsx_sg_id = nsx_db.get_nsx_security_group_id(context.session, id, moref=True) if delete_base: # Delete neutron security group super(NsxVPluginV2, self).delete_security_group(context, id) # Delete nsx rule sections self._delete_section(section_uri) # Delete nsx security group self._delete_nsx_security_group(nsx_sg_id, policy) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Failed to delete security group") def _create_nsx_rule(self, context, rule, nsx_sg_id=None, logged=False, action='allow'): src = None dest = None port = None protocol = None icmptype = None icmpcode = None flags = {} if nsx_sg_id is None: # Find nsx security group for neutron security group nsx_sg_id = nsx_db.get_nsx_security_group_id( context.session, rule['security_group_id'], moref=True) # Find the remote nsx security group id, which might be the current # one. In case of the default security-group, the associated # nsx-security-group wasn't written to the database yet. if rule['remote_group_id'] == rule['security_group_id']: remote_nsx_sg_id = nsx_sg_id else: remote_nsx_sg_id = nsx_db.get_nsx_security_group_id( context.session, rule['remote_group_id'], moref=True) # Get source and destination containers from rule if rule['direction'] == 'ingress': if rule.get(secgroup_rule_local_ip_prefix.LOCAL_IP_PREFIX): dest = self.nsx_sg_utils.get_remote_container( None, rule[secgroup_rule_local_ip_prefix.LOCAL_IP_PREFIX]) src = self.nsx_sg_utils.get_remote_container( remote_nsx_sg_id, rule['remote_ip_prefix']) dest = dest or self.nsx_sg_utils.get_container(nsx_sg_id) flags['direction'] = 'in' else: dest = self.nsx_sg_utils.get_remote_container( remote_nsx_sg_id, rule['remote_ip_prefix']) src = self.nsx_sg_utils.get_container(nsx_sg_id) flags['direction'] = 'out' protocol = rule.get('protocol') if rule['port_range_min'] is not None: if protocol == '1' or protocol == 'icmp': icmptype = str(rule['port_range_min']) if rule['port_range_max'] is not None: icmpcode = str(rule['port_range_max']) else: port = str(rule['port_range_min']) if rule['port_range_max'] != rule['port_range_min']: port = port + '-' + str(rule['port_range_max']) # Get the neutron rule id to use as name in nsxv rule name = rule.get('id') services = [(protocol, port, icmptype, icmpcode)] if protocol else [] flags['ethertype'] = rule.get('ethertype') # Add rule in nsx rule section nsx_rule = self.nsx_sg_utils.get_rule_config( applied_to_ids=[nsx_sg_id], name=name, source=src, destination=dest, services=services, flags=flags, action=action, logged=logged, tag='Project_%s' % rule['tenant_id']) return nsx_rule def create_security_group_rule(self, context, security_group_rule, create_base=True): """Create a single security group rule.""" bulk_rule = {'security_group_rules': [security_group_rule]} return self.create_security_group_rule_bulk( context, bulk_rule, create_base=create_base)[0] def create_security_group_rule_bulk(self, context, security_group_rules, create_base=True): """Create security group rules. :param security_group_rules: list of rules to create """ sg_rules = security_group_rules['security_group_rules'] sg_id = sg_rules[0]['security_group_rule']['security_group_id'] self._prevent_non_admin_delete_provider_sg(context, sg_id) ruleids = set() nsx_rules = [] self._validate_security_group_rules(context, security_group_rules) if self._is_policy_security_group(context, sg_id): # If policies are/were enabled - creating rules is forbidden msg = (_('Cannot create rules for security group %s with' ' a policy') % sg_id) raise n_exc.InvalidInput(error_message=msg) with locking.LockManager.get_lock('rule-update-%s' % sg_id): # Querying DB for associated dfw section id section_uri = self._get_section_uri(context.session, sg_id) logging = self._is_security_group_logged(context, sg_id) provider = self._is_provider_security_group(context, sg_id) log_all_rules = cfg.CONF.nsxv.log_security_groups_allowed_traffic # Translating Neutron rules to Nsx DFW rules for r in sg_rules: rule = r['security_group_rule'] if not self._check_local_ip_prefix(context, rule): rule[secgroup_rule_local_ip_prefix.LOCAL_IP_PREFIX] = None rule['id'] = rule.get('id') or uuidutils.generate_uuid() ruleids.add(rule['id']) nsx_rules.append( self._create_nsx_rule(context, rule, logged=log_all_rules or logging, action='deny' if provider else 'allow') ) _h, _c = self.nsx_v.vcns.get_section(section_uri) section = self.nsx_sg_utils.parse_section(_c) self.nsx_sg_utils.extend_section_with_rules(section, nsx_rules) h, c = self.nsx_v.vcns.update_section( section_uri, self.nsx_sg_utils.to_xml_string(section), _h) rule_pairs = self.nsx_sg_utils.get_rule_id_pair_from_section(c) try: # Save new rules in Database, including mappings between Nsx rules # and Neutron security-groups rules with db_api.context_manager.writer.using(context): if create_base: new_rule_list = super( NsxVPluginV2, self).create_security_group_rule_bulk_native( context, security_group_rules) for i, r in enumerate(sg_rules): self._process_security_group_rule_properties( context, new_rule_list[i], r['security_group_rule']) else: new_rule_list = sg_rules for pair in rule_pairs: neutron_rule_id = pair['neutron_id'] nsx_rule_id = pair['nsx_id'] if neutron_rule_id in ruleids: nsxv_db.add_neutron_nsx_rule_mapping( context.session, neutron_rule_id, nsx_rule_id) except Exception: with excutils.save_and_reraise_exception(): for nsx_rule_id in [p['nsx_id'] for p in rule_pairs if p['neutron_id'] in ruleids]: with locking.LockManager.get_lock('rule-update-%s' % sg_id): self.nsx_v.vcns.remove_rule_from_section( section_uri, nsx_rule_id) LOG.exception("Failed to create security group rule") return new_rule_list def delete_security_group_rule(self, context, id, delete_base=True): """Delete a security group rule.""" rule_db = self._get_security_group_rule(context, id) security_group_id = rule_db['security_group_id'] self._prevent_non_admin_delete_provider_sg(context, security_group_id) # Get the nsx rule from neutron DB and delete it nsx_rule_id = nsxv_db.get_nsx_rule_id(context.session, id) section_uri = self._get_section_uri( context.session, security_group_id) try: if nsx_rule_id and section_uri: with locking.LockManager.get_lock('rule-update-%s' % security_group_id): self.nsx_v.vcns.remove_rule_from_section( section_uri, nsx_rule_id) except vsh_exc.ResourceNotFound: LOG.debug("Security group rule %(id)s deleted, backend " "nsx-rule %(nsx_rule_id)s doesn't exist.", {'id': id, 'nsx_rule_id': nsx_rule_id}) if delete_base: securitygroup.SecurityGroupRule.delete_objects(context, id=id) def _remove_vnic_from_spoofguard_policy(self, session, net_id, vnic_id): policy_id = nsxv_db.get_spoofguard_policy_id(session, net_id) self.nsx_v.vcns.inactivate_vnic_assigned_addresses(policy_id, vnic_id) def _update_vnic_assigned_addresses(self, session, port, vnic_id): sg_policy_id = nsxv_db.get_spoofguard_policy_id( session, port['network_id']) if not sg_policy_id: LOG.warning("Spoofguard not defined for network %s", port['network_id']) return mac_addr = port['mac_address'] approved_addrs = [addr['ip_address'] for addr in port['fixed_ips']] # add in the address pair approved_addrs.extend( addr['ip_address'] for addr in port[addr_apidef.ADDRESS_PAIRS]) # add the IPv6 link-local address if there is an IPv6 address if any([netaddr.valid_ipv6(address) for address in approved_addrs]): lla = str(netutils.get_ipv6_addr_by_EUI64( constants.IPv6_LLA_PREFIX, mac_addr)) approved_addrs.append(lla) try: self.nsx_v.vcns.approve_assigned_addresses( sg_policy_id, vnic_id, mac_addr, approved_addrs) self.nsx_v.vcns.publish_assigned_addresses(sg_policy_id, vnic_id) except vsh_exc.AlreadyExists: # Entry already configured on the NSX pass def _is_compute_port(self, port): try: if (port['device_id'] and uuidutils.is_uuid_like(port['device_id']) and port['device_owner'].startswith('compute:')): return True except (KeyError, AttributeError): pass return False def _is_valid_ip(self, ip_addr): return netaddr.valid_ipv4(ip_addr) or netaddr.valid_ipv6(ip_addr) def _ensure_lock_operations(self): try: self.nsx_v.vcns.edges_lock_operation() except Exception: LOG.info("Unable to set manager lock operation") def _aggregate_publishing(self): try: self.nsx_v.vcns.configure_aggregate_publishing() except Exception: LOG.info("Unable to configure aggregate publishing") def _configure_reservations(self): ver = self.nsx_v.vcns.get_version() if version.LooseVersion(ver) < version.LooseVersion('6.2.3'): LOG.debug("Skipping reservation configuration. " "Not supported by version - %s.", ver) return try: self.nsx_v.vcns.configure_reservations() except Exception: LOG.info("Unable to configure edge reservations") def _validate_config(self): existing_dvs = self.nsx_v.vcns.get_dvs_list() if (cfg.CONF.nsxv.dvs_id and not self.nsx_v.vcns.validate_dvs(cfg.CONF.nsxv.dvs_id, dvs_list=existing_dvs)): raise nsx_exc.NsxResourceNotFound( res_name='dvs_id', res_id=cfg.CONF.nsxv.dvs_id) for dvs_id in self._availability_zones_data.get_additional_dvs_ids(): if not self.nsx_v.vcns.validate_dvs(dvs_id, dvs_list=existing_dvs): raise nsx_exc.NsxAZResourceNotFound( res_name='dvs_id', res_id=dvs_id) # validate network-vlan dvs ID's for dvs_id in self._network_vlans: if not self.nsx_v.vcns.validate_dvs(dvs_id, dvs_list=existing_dvs): raise nsx_exc.NsxResourceNotFound(res_name='dvs_id', res_id=dvs_id) # Validate the global & per-AZ validate_datacenter_moid if not self.nsx_v.vcns.validate_datacenter_moid( cfg.CONF.nsxv.datacenter_moid, during_init=True): raise nsx_exc.NsxResourceNotFound( res_name='datacenter_moid', res_id=cfg.CONF.nsxv.datacenter_moid) for dc in self._availability_zones_data.get_additional_datacenter(): if not self.nsx_v.vcns.validate_datacenter_moid( dc, during_init=True): raise nsx_exc.NsxAZResourceNotFound( res_name='datacenter_moid', res_id=dc) # Validate the global & per-AZ external_network if not self.nsx_v.vcns.validate_network( cfg.CONF.nsxv.external_network, during_init=True): raise nsx_exc.NsxResourceNotFound( res_name='external_network', res_id=cfg.CONF.nsxv.external_network) for ext_net in self._availability_zones_data.get_additional_ext_net(): if not self.nsx_v.vcns.validate_network( ext_net, during_init=True): raise nsx_exc.NsxAZResourceNotFound( res_name='external_network', res_id=ext_net) # Validate the global & per-AZ vdn_scope_id if not self.nsx_v.vcns.validate_vdn_scope(cfg.CONF.nsxv.vdn_scope_id): raise nsx_exc.NsxResourceNotFound( res_name='vdn_scope_id', res_id=cfg.CONF.nsxv.vdn_scope_id) for vdns in self._availability_zones_data.get_additional_vdn_scope(): if not self.nsx_v.vcns.validate_vdn_scope(vdns): raise nsx_exc.NsxAZResourceNotFound( res_name='vdn_scope_id', res_id=vdns) # Validate the global & per-AZ mgt_net_moid if (cfg.CONF.nsxv.mgt_net_moid and not self.nsx_v.vcns.validate_network( cfg.CONF.nsxv.mgt_net_moid)): raise nsx_exc.NsxResourceNotFound( res_name='mgt_net_moid', res_id=cfg.CONF.nsxv.mgt_net_moid) for mgmt_net in self._availability_zones_data.get_additional_mgt_net(): if not self.nsx_v.vcns.validate_network(mgmt_net): raise nsx_exc.NsxAZResourceNotFound( res_name='mgt_net_moid', res_id=mgmt_net) ver = self.nsx_v.vcns.get_version() if version.LooseVersion(ver) < version.LooseVersion('6.2.0'): LOG.warning("Skipping validations. Not supported by version.") return # Validate the host_groups for each AZ if cfg.CONF.nsxv.use_dvs_features: azs = self.get_azs_list() for az in azs: if az.edge_host_groups and az.edge_ha: if len(az.edge_host_groups) < 2: error = _("edge_host_groups must have at least 2 " "names") raise nsx_exc.NsxPluginException(err_msg=error) if (not az.ha_placement_random and len(az.edge_host_groups) > 2): LOG.warning("Availability zone %(az)s has %(count)s " "hostgroups. only the first 2 will be " "used until ha_placement_random is " "enabled", {'az': az.name, 'count': len(az.edge_host_groups)}) self._vcm.validate_host_groups(az.resource_pool, az.edge_host_groups) # Validations below only supported by 6.2.0 and above inventory = [(cfg.CONF.nsxv.resource_pool_id, 'resource_pool_id'), (cfg.CONF.nsxv.datastore_id, 'datastore_id'), (cfg.CONF.nsxv.ha_datastore_id, 'ha_datastore_id'), ] # Treat the cluster list for cluster in cfg.CONF.nsxv.cluster_moid: inventory.append((cluster, 'cluster_moid')) # Add the availability zones resources az_resources = self._availability_zones_data.get_inventory() for res in az_resources: inventory.append((res, 'availability_zone ' + res)) if cfg.CONF.nsxv.use_nsx_policies: # if use_nsx_policies=True, the default policy must be defined if not cfg.CONF.nsxv.default_policy_id: error = _("default_policy_id must be defined") raise nsx_exc.NsxPluginException(err_msg=error) inventory.append((cfg.CONF.nsxv.default_policy_id, 'default_policy_id')) for moref, field in inventory: if moref and not self.nsx_v.vcns.validate_inventory(moref): error = _("Configured %s not found") % field raise nsx_exc.NsxPluginException(err_msg=error) if cfg.CONF.nsxv.vdr_transit_network: edge_utils.validate_vdr_transit_network() def _nsx_policy_is_hidden(self, policy): for attrib in policy.get('extendedAttributes', []): if (attrib['name'].lower() == 'ishidden' and attrib['value'].lower() == 'true'): return True return False def _nsx_policy_to_dict(self, policy): return {'id': policy['objectId'], 'name': policy.get('name'), 'description': policy.get('description')} def get_nsx_policy(self, context, id, fields=None): try: policy = self.nsx_v.vcns.get_security_policy(id, return_xml=False) except vsh_exc.ResourceNotFound: # no such policy on backend raise n_exc.ObjectNotFound(id=id) if self._nsx_policy_is_hidden(policy): # This is an hidden policy raise n_exc.ObjectNotFound(id=id) return self._nsx_policy_to_dict(policy) def get_nsx_policies(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): policies = self.nsx_v.vcns.get_security_policies() results = [] for policy in policies.get('policies', []): if not self._nsx_policy_is_hidden(policy): results.append(self._nsx_policy_to_dict(policy)) return results def get_housekeeper(self, context, name, fields=None): return self.housekeeper.get(name) def get_housekeepers(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): return self.housekeeper.list() def update_housekeeper(self, context, name, housekeeper): self.housekeeper.run(context, name) return self.housekeeper.get(name) def _get_appservice_id(self, name): return self.nsx_v.vcns.get_application_id(name) vmware-nsx-12.0.1/vmware_nsx/check_nsx_config.py0000666000175100017510000001436613244523345022004 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import sys from oslo_config import cfg from neutron.common import config from vmware_nsx._i18n import _ from vmware_nsx.common import config as nsx_config # noqa from vmware_nsx.common import nsx_utils from vmware_nsx.nsxlib import mh as nsxlib config.setup_logging() def help(name): print("Usage: %s path/to/neutron/plugin/ini/config/file" % name) sys.exit(1) def get_nsx_controllers(cluster): return cluster.nsx_controllers def config_helper(config_entity, cluster): try: return nsxlib.do_request('GET', "/ws.v1/%s?fields=uuid" % config_entity, cluster=cluster).get('results', []) except Exception as e: msg = (_("Error '%(err)s' when connecting to controller(s): %(ctl)s.") % {'err': str(e), 'ctl': ', '.join(get_nsx_controllers(cluster))}) raise Exception(msg) def get_control_cluster_nodes(cluster): return config_helper("control-cluster/node", cluster) def get_gateway_services(cluster): ret_gw_services = {"L2GatewayServiceConfig": [], "L3GatewayServiceConfig": []} gw_services = config_helper("gateway-service", cluster) for gw_service in gw_services: ret_gw_services[gw_service['type']].append(gw_service['uuid']) return ret_gw_services def get_transport_zones(cluster): transport_zones = config_helper("transport-zone", cluster) return [transport_zone['uuid'] for transport_zone in transport_zones] def get_transport_nodes(cluster): transport_nodes = config_helper("transport-node", cluster) return [transport_node['uuid'] for transport_node in transport_nodes] def is_transport_node_connected(cluster, node_uuid): try: return nsxlib.do_request('GET', "/ws.v1/transport-node/%s/status" % node_uuid, cluster=cluster)['connection']['connected'] except Exception as e: msg = (_("Error '%(err)s' when connecting to controller(s): %(ctl)s.") % {'err': str(e), 'ctl': ', '.join(get_nsx_controllers(cluster))}) raise Exception(msg) def main(): if len(sys.argv) != 2: help(sys.argv[0]) args = ['--config-file'] args.append(sys.argv[1]) config.init(args) print("----------------------- Database Options -----------------------") print("\tconnection: %s" % cfg.CONF.database.connection) print("\tretry_interval: %d" % cfg.CONF.database.retry_interval) print("\tmax_retries: %d" % cfg.CONF.database.max_retries) print("----------------------- NSX Options -----------------------") print("\tNSX Generation Timeout %d" % cfg.CONF.NSX.nsx_gen_timeout) print("\tNumber of concurrent connections to each controller %d" % cfg.CONF.NSX.concurrent_connections) print("\tmax_lp_per_bridged_ls: %s" % cfg.CONF.NSX.max_lp_per_bridged_ls) print("\tmax_lp_per_overlay_ls: %s" % cfg.CONF.NSX.max_lp_per_overlay_ls) print("----------------------- Cluster Options -----------------------") print("\tretries: %s" % cfg.CONF.retries) print("\tredirects: %s" % cfg.CONF.redirects) print("\thttp_timeout: %s" % cfg.CONF.http_timeout) cluster = nsx_utils.create_nsx_cluster( cfg.CONF, cfg.CONF.NSX.concurrent_connections, cfg.CONF.NSX.nsx_gen_timeout) nsx_controllers = get_nsx_controllers(cluster) num_controllers = len(nsx_controllers) print("Number of controllers found: %s" % num_controllers) if num_controllers == 0: print("You must specify at least one controller!") sys.exit(1) get_control_cluster_nodes(cluster) for controller in nsx_controllers: print("\tController endpoint: %s" % controller) gateway_services = get_gateway_services(cluster) default_gateways = { "L2GatewayServiceConfig": cfg.CONF.default_l2_gw_service_uuid, "L3GatewayServiceConfig": cfg.CONF.default_l3_gw_service_uuid} errors = 0 for svc_type in default_gateways.keys(): for uuid in gateway_services[svc_type]: print("\t\tGateway(%s) uuid: %s" % (svc_type, uuid)) if (default_gateways[svc_type] and default_gateways[svc_type] not in gateway_services[svc_type]): print("\t\t\tError: specified default %s gateway (%s) is " "missing from NSX Gateway Services!" % ( svc_type, default_gateways[svc_type])) errors += 1 transport_zones = get_transport_zones(cluster) print("\tTransport zones: %s" % transport_zones) if cfg.CONF.default_tz_uuid not in transport_zones: print("\t\tError: specified default transport zone " "(%s) is missing from NSX transport zones!" % cfg.CONF.default_tz_uuid) errors += 1 transport_nodes = get_transport_nodes(cluster) print("\tTransport nodes: %s" % transport_nodes) node_errors = [] for node in transport_nodes: if not is_transport_node_connected(cluster, node): node_errors.append(node) # Use different exit codes, so that we can distinguish # between config and runtime errors if len(node_errors): print("\nThere are one or more transport nodes that are " "not connected: %s. Please, revise!" % node_errors) sys.exit(10) elif errors: print("\nThere are %d errors with your configuration. " "Please, revise!" % errors) sys.exit(12) else: print("Done.") vmware-nsx-12.0.1/vmware_nsx/shell/0000775000175100017510000000000013244524600017226 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/commands.py0000666000175100017510000000506013244523345021411 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutronclient.neutron import v2_0 as client from vmware_nsx._i18n import _ LSN_PATH = '/lsns' def print_report(write_func, report): write_func(_("\nService type = %s\n") % report['report']['type']) services = ','.join(report['report']['services']) ports = ','.join(report['report']['ports']) write_func(_("Service uuids = %s\n") % services) write_func(_("Port uuids = %s\n\n") % ports) class NetworkReport(client.NeutronCommand): """Retrieve network migration report.""" def get_parser(self, prog_name): parser = super(NetworkReport, self).get_parser(prog_name) parser.add_argument('network', metavar='network', help=_('ID or name of network to run report on')) return parser def run(self, parsed_args): net = parsed_args.network net_id = client.find_resourceid_by_name_or_id(self.app.client, 'network', net) res = self.app.client.get("%s/%s" % (LSN_PATH, net_id)) if res: self.app.stdout.write(_('Migration report is:\n')) print_report(self.app.stdout.write, res['lsn']) class NetworkMigrate(client.NeutronCommand): """Perform network migration.""" def get_parser(self, prog_name): parser = super(NetworkMigrate, self).get_parser(prog_name) parser.add_argument('network', metavar='network', help=_('ID or name of network to migrate')) return parser def run(self, parsed_args): net = parsed_args.network net_id = client.find_resourceid_by_name_or_id(self.app.client, 'network', net) body = {'network': net_id} res = self.app.client.post(LSN_PATH, body={'lsn': body}) if res: self.app.stdout.write(_('Migration has been successful:\n')) print_report(self.app.stdout.write, res['lsn']) vmware-nsx-12.0.1/vmware_nsx/shell/nsx_instance_if_migrate.py0000666000175100017510000002057013244523345024475 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import getopt import logging import re import sys import xml.etree.ElementTree as et from keystoneauth1 import identity from keystoneauth1 import session import libvirt from neutronclient.v2_0 import client import nova.conf CONF = nova.conf.CONF logging.basicConfig(level=logging.INFO) LOG = logging.getLogger(__name__) def usage(): print ("python nsx_instance_if_migrate.py --username= " "--password= --project= " "--auth-url= " "[--project-domain-id=] " "[--user-domain-id=] " "[--machine-type=] " "[--nsx-bridge=]\n\n" "Convert libvirt interface definitions on a KVM host, to NSX " "managed vSwitch definitions\n\n" " username: Admin user's username\n" " password: Admin user's password\n" " keystone auth URL: URL to keystone's authentication service\n" " project domain: Keystone project domain\n" " user domain: Keystone user domain\n" " migrated machine type: Overwrites libvirt's machine type\n" " log file: Output log of the command execution\n" " NSX managed vSwitch: vSwitch on host, managed by NSX\n\n") sys.exit() def get_opts(): opts = {} o = [] p = re.compile('^-+') try: o, a = getopt.getopt(sys.argv[1:], 'h', ['help', 'username=', 'password=', 'project=', 'project-domain-id=', 'user-domain-id=', 'auth-url=', 'machine-type=', 'logfile=', 'nsx-bridge=']) except getopt.GetoptError as err: LOG.error(err) usage() for opt, val in o: if opt in ('h', 'help'): usage() else: opts[p.sub('', opt)] = val for mandatory_key in ['username', 'password', 'project', 'auth-url']: if opts.get(mandatory_key) is None: LOG.error("%s must be specified!", mandatory_key) usage() return opts def xmltag_text_get(obj, tag_name): tag_obj = obj.find(tag_name) if tag_obj is not None: return tag_obj.text def xmltag_attr_get(obj, tag, attr): tag_obj = obj.find(tag) if tag_obj is not None: return tag_obj.get(attr) def xmltag_set(elem, tag, **kwargs): sub_elem = elem.find(tag) if sub_elem is None: sub_elem = et.SubElement(elem, tag) for attr in kwargs.keys(): sub_elem.set(attr, kwargs.get(attr)) return sub_elem def iface_migrate(neutron, instance_name, iface, nsx_switch): iface.set('type', 'bridge') xmltag_set(iface, 'source', bridge=nsx_switch) virt_port = xmltag_set(iface, 'virtualport', type='openvswitch') instance_mac = xmltag_attr_get(iface, 'mac', 'address') if instance_mac is None: LOG.error("Couldn't find MAC address for instance %s", instance_name) return ports = neutron.list_ports(fields=['id'], mac_address=instance_mac) if len(ports['ports']) != 1: LOG.error('For instance %(vm)s, invalid ports received from neutron: ' '%(ports)s', {'vm': instance_name, 'ports': ports}) return neutron_port_id = ports['ports'][0]['id'] xmltag_set(virt_port, 'parameters', interfaceid=neutron_port_id) xmltag_set(iface, 'driver', name='qemu') tap_dev = xmltag_attr_get(iface, 'target', 'dev') if tap_dev is None: LOG.error("For instance %(vm)s, couldn't find tap device for " "interface", instance_name) # remove script tag if found script_tag = iface.find('script') if script_tag is not None: iface.remove(script_tag) def is_valid_os_data(libvirt_conn, os_type, os_arch, os_machine): caps_xml = libvirt_conn.getCapabilities() caps_root = et.fromstring(caps_xml) for guest_tag in caps_root.findall('guest'): if (xmltag_text_get(guest_tag, 'os_type') == os_type and xmltag_attr_get(guest_tag, 'arch', 'name') == os_arch): for machine_tag in guest_tag.find('arch').findall('machine'): if machine_tag.text == os_machine: return True return False def instance_migrate(libvirt_conn, neutron, instance, machine_type, nsx_switch): xml = instance.XMLDesc() root = et.fromstring(xml) instance_name = xmltag_text_get(root, 'name') if instance_name is None: LOG.error("Couldn't find instance name in XML") return instance_uuid = xmltag_text_get(root, 'uuid') if instance_uuid is None: LOG.error("Couldn't find UUID for instance %s", instance_name) return # Validate that os is supported by hypervisor os_tag = root.find('os') if os_tag is None: LOG.error("Couldn't find OS tag for instance %s", instance_name) return type_tag = os_tag.find('type') if not is_valid_os_data(libvirt_conn, type_tag.text, type_tag.get('arch'), type_tag.get('machine')): LOG.error("Instance %s OS data is invalid or not supported by " "hypervisor", instance_name) return if machine_type is not None: type_tag.set('machine', machine_type) devs = root.find('devices') ifaces = devs.findall('interface') if not ifaces: LOG.error('No interfaces to migrate for instance %s', instance_name) for iface in ifaces: iface_migrate(neutron, instance_name, iface, nsx_switch) instance.undefine() libvirt_conn.defineXML(et.tostring(root)) LOG.info('Migrated instance %(vm)s (%(uuid)s) successfully!', {'vm': instance_name, 'uuid': instance_uuid}) def main(): opts = get_opts() if opts.get('logfile'): f_handler = logging.FileHandler(opts.get('logfile')) f_formatter = logging.Formatter( '%(asctime)s %(levelname)s %(message)s') f_handler.setFormatter(f_formatter) LOG.addHandler(f_handler) conn = libvirt.open('qemu:///system') if conn is None: LOG.error('Failed to connect to libvirt') exit(1) auth = identity.Password(username=opts['username'], password=opts['password'], project_name=opts['project'], project_domain_id=opts.get('project-domain-id', 'default'), user_domain_id=opts.get('user-domain-id', 'default'), auth_url=opts['auth-url']) if auth is None: LOG.error('Failed to authenticate with keystone') exit(1) sess = session.Session(auth=auth) if sess is None: LOG.error('Failed to create keystone session') exit(1) neutron = client.Client(session=sess) if neutron is None: LOG.error('Failed to create neutron session') exit(1) instances = conn.listAllDomains() if not instances: LOG.error('No instances to migrate') for instance in instances: try: instance_migrate(conn, neutron, instance, opts.get('machine-type'), opts.get('nsx-bridge', CONF.neutron.ovs_bridge)) except Exception as e: LOG.error('Failed to migrate instance with exception %s', e) if __name__ == "__main__": main() vmware-nsx-12.0.1/vmware_nsx/shell/__init__.py0000666000175100017510000000357513244523345021360 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from neutronclient import shell from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.shell import commands as cmd # Oslo Logging uses INFO as default # Use a simple format for the output logging_format_string = '%(message)s' logging.register_options(cfg.CONF) logging.setup(cfg.CONF, "vmware-nsx") cfg.CONF.set_override('logging_context_format_string', logging_format_string) cfg.CONF.set_override('logging_default_format_string', logging_format_string) cfg.CONF.set_override('logging_exception_prefix', '') class NsxManage(shell.NeutronShell): def __init__(self, api_version): super(NsxManage, self).__init__(api_version) self.command_manager.add_command('net-migrate', cmd.NetworkMigrate) self.command_manager.add_command('net-report', cmd.NetworkReport) def build_option_parser(self, description, version): parser = super(NsxManage, self).build_option_parser( description, version) return parser def initialize_app(self, argv): super(NsxManage, self).initialize_app(argv) self.client = self.client_manager.neutron def main(): return NsxManage(shell.NEUTRON_API_VERSION).run(sys.argv[1:]) vmware-nsx-12.0.1/vmware_nsx/shell/admin/0000775000175100017510000000000013244524600020316 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/admin/README.rst0000666000175100017510000001503713244523345022022 0ustar zuulzuul00000000000000Admin Utility ============= Introduction ------------ Purpose of this script is to build a framework which can be leveraged to build utilities to help the on-field ops in system debugging. Adding custom functions ----------------------- Refer to the security groups example for reference implementation under, admin/plugins/nsx_v3/resources/securitygroups.py Adding new functions is fairly straightforward: * Define the function under appropriate package. We use neutron callbacks to provide hooks. So your function definition should be like, :: def function(resource, event, trigger, **kwargs) * Add the Resources and Operations enums if they don't exist. :: class Operations(object): NEUTRON_CLEAN = 'neutron_clean' :: nsxv3_resources = { constants.SECURITY_GROUPS: Resource(constants.SECURITY_GROUPS, ops) } * In resource.py, add the function to the callback registry. :: registry.subscribe(neutron_clean_security_groups, Resources.SECURITY_GROUPS.value, Operations.NEUTRON_CLEAN.value) * To test, do :: cd vmware-nsx/shell sudo pip install -e . nsxadmin -r -o TODO ---- * Use Cliff * Auto complete command line args. Directory Structure ------------------- admin/ plugins/ common/ Contains code specific to different plugin versions. nsx_v3/ resources/ Contains modules for various resources supported by the admin utility. These modules contains methods to perform operations on these resources. Installation ------------ :: sudo pip install -e . Usage ----- :: nsxadmin -r -o Example ------- :: $ nsxadmin -r security-groups -o list ==== [NSX] List Security Groups ==== Firewall Sections +------------------------------------------------+--------------------------------------+ | display_name | id | |------------------------------------------------+--------------------------------------| | default - 261343f8-4f35-4e57-9cc7-6c4fc7723b72 | 91a05fbd-054a-48b6-8e60-3b5d445be8c7 | | default - 823247b6-bdb3-47be-8bac-0d1114fc1ad7 | 78116d4a-de77-4a8f-b3e5-e76f458840ea | | OS default section for security-groups | 10a2fc6c-29c9-4d8d-ac2c-b24aafa15c79 | | Default Layer3 Section | e479e404-e712-4adb-879c-e432d510c056 | +------------------------------------------------+--------------------------------------+ Firewall NS Groups +------------------------------------------------+--------------------------------------+ | display_name | id | |------------------------------------------------+--------------------------------------| | NSGroup Container | c0b26e82-d49b-49f0-b68e-7449a59366e9 | | default - 261343f8-4f35-4e57-9cc7-6c4fc7723b72 | 2e5b5ca1-f687-4556-8130-9524b313474b | | default - 823247b6-bdb3-47be-8bac-0d1114fc1ad7 | b5cd9ae4-42b5-47a7-a1bf-9767ac62466e | +------------------------------------------------+--------------------------------------+ ==== [NEUTRON] List Security Groups Mappings ==== security-groups +---------+--------------------------------------+-----------------------------------------------------------+----------------------+ | name | id | section-uri | nsx-securitygroup-id | +---------+--------------------------------------+-----------------------------------------------------------+----------------------+ | default | f785c82a-5b28-42ac-aa0a-ad56720ccbbc | /api/4.0/firewall/globalroot-0/config/layer3sections/1006 | securitygroup-12 | +---------+--------------------------------------+-----------------------------------------------------------+----------------------+ $ nsxadmin -r security-groups -o list -f json ==== [NSX] List Security Groups ==== { "Firewall Sections": [ { "display_name": "default - 261343f8-4f35-4e57-9cc7-6c4fc7723b72", "id": "91a05fbd-054a-48b6-8e60-3b5d445be8c7" }, { "display_name": "default - 823247b6-bdb3-47be-8bac-0d1114fc1ad7", "id": "78116d4a-de77-4a8f-b3e5-e76f458840ea" }, { "display_name": "OS default section for security-groups", "id": "10a2fc6c-29c9-4d8d-ac2c-b24aafa15c79" }, { "display_name": "Default Layer3 Section", "id": "e479e404-e712-4adb-879c-e432d510c056" } ] } { "Firewall NS Groups": [ { "display_name": "NSGroup Container", "id": "c0b26e82-d49b-49f0-b68e-7449a59366e9" }, { "display_name": "default - 261343f8-4f35-4e57-9cc7-6c4fc7723b72", "id": "2e5b5ca1-f687-4556-8130-9524b313474b" }, { "display_name": "default - 823247b6-bdb3-47be-8bac-0d1114fc1ad7", "id": "b5cd9ae4-42b5-47a7-a1bf-9767ac62466e" } ] } ==== [NEUTRON] List Security Groups Mappings ==== security-groups { "security-groups": [ { "id": "f785c82a-5b28-42ac-aa0a-ad56720ccbbc", "name": "default", "nsx-securitygroup-id": "securitygroup-12", "section-uri": "/api/4.0/firewall/globalroot-0/config/layer3sections/1006" } } Upgrade Steps (Version 1.0.0 to Version 1.1.0) ---------------------------------------------- 1. Upgrade NSX backend from version 1.0.0 to version 1.1.0 2. Create a DHCP-Profile and a Metadata-Proxy in NSX backend 3. Stop Neutron 4. Install version 1.1.0 Neutron plugin 5. Run admin tools to migrate version 1.0.0 objects to version 1.1.0 objects * nsxadmin -r metadata-proxy -o nsx-update --property metadata_proxy_uuid= * nsxadmin -r dhcp-binding -o nsx-update --property dhcp_profile_uuid= 6. Start Neutron 7. Make sure /etc/nova/nova.conf has metadata_proxy_shared_secret = 8. Restart VMs or ifdown/ifup their network interface to get new DHCP options Help ---- :: $ nsxadmin --help vmware-nsx-12.0.1/vmware_nsx/shell/admin/version.py0000666000175100017510000000120713244523345022364 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __version__ = '0.1' vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/0000775000175100017510000000000013244524600021777 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv/0000775000175100017510000000000013244524600022775 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv/resources/0000775000175100017510000000000013244524600025007 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv/resources/utils.py0000666000175100017510000001431713244523413026532 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import mock from oslo_config import cfg from oslo_log import log as logging from neutron.db import common_db_mixin as common_db from neutron_lib import context as neutron_context from neutron_lib.plugins import directory from vmware_nsx.common import config from vmware_nsx.extensions import projectpluginmap from vmware_nsx import plugin from vmware_nsx.plugins.nsx_v.vshield import vcns from vmware_nsx.shell.admin.plugins.common import utils as admin_utils LOG = logging.getLogger(__name__) def get_nsxv_client(): return vcns.Vcns( address=cfg.CONF.nsxv.manager_uri, user=cfg.CONF.nsxv.user, password=cfg.CONF.nsxv.password, ca_file=cfg.CONF.nsxv.ca_file, insecure=cfg.CONF.nsxv.insecure) def get_plugin_filters(context): return admin_utils.get_plugin_filters( context, projectpluginmap.NsxPlugins.NSX_V) class NeutronDbClient(common_db.CommonDbMixin): def __init__(self): super(NeutronDbClient, self) self.context = neutron_context.get_admin_context() class NsxVPluginWrapper(plugin.NsxVPlugin): def __init__(self): config.register_nsxv_azs(cfg.CONF, cfg.CONF.nsxv.availability_zones) self.context = neutron_context.get_admin_context() self.filters = get_plugin_filters(self.context) super(NsxVPluginWrapper, self).__init__() # Make this the core plugin directory.add_plugin('CORE', self) # finish the plugin initialization # (with md-proxy config, but without housekeeping) with mock.patch("vmware_nsx.plugins.common.housekeeper." "housekeeper.NsxvHousekeeper"): self.init_complete(0, 0, 0) def _start_rpc_listeners(self): pass def _extend_get_network_dict_provider(self, context, net): self._extend_network_dict_provider(context, net) # skip getting the Qos policy ID because get_object calls # plugin init again on admin-util environment def count_spawn_jobs(self): # check if there are any spawn jobs running return self.edge_manager._get_worker_pool().running() # Define enter & exit to be used in with statements def __enter__(self): return self def __exit__(self, type, value, traceback): """Wait until no more jobs are pending We want to wait until all spawn edge creation are done, or else the edges might be in PERNDING_CREATE state in the nsx DB """ if not self.count_spawn_jobs(): return LOG.warning("Waiting for plugin jobs to finish properly...") sleep_time = 1 print_time = 20 max_loop = 600 for print_index in range(1, max_loop): n_jobs = self.count_spawn_jobs() if n_jobs > 0: if (print_index % print_time) == 0: LOG.warning("Still Waiting on %(jobs)s " "job%(plural)s", {'jobs': n_jobs, 'plural': 's' if n_jobs > 1 else ''}) time.sleep(sleep_time) else: LOG.warning("Done.") return LOG.warning("Sorry. Waited for too long. Some jobs are still " "running.") def _update_filters(self, requested_filters): filters = self.filters.copy() if requested_filters: filters.update(requested_filters) return filters def get_networks(self, context, filters=None, fields=None): filters = self._update_filters(filters) return super(NsxVPluginWrapper, self).get_networks( context, filters=filters, fields=fields) def get_subnets(self, context, filters=None, fields=None): filters = self._update_filters(filters) return super(NsxVPluginWrapper, self).get_subnets( context, filters=filters, fields=fields) def get_ports(self, context, filters=None, fields=None): filters = self._update_filters(filters) return super(NsxVPluginWrapper, self).get_ports( self.context, filters=filters, fields=fields) def get_routers(self, context, filters=None, fields=None): filters = self._update_filters(filters) return super(NsxVPluginWrapper, self).get_routers( self.context, filters=filters, fields=fields) def get_nsxv_backend_edges(): """Get a list of all the backend edges and some of their attributes """ nsxv = get_nsxv_client() edges = nsxv.get_edges() backend_edges = [] for edge in edges: summary = edge.get('appliancesSummary') size = ha = None if summary: size = summary.get('applianceSize') deployed_vms = summary.get('numberOfDeployedVms', 1) ha = 'Enabled' if deployed_vms > 1 else 'Disabled' # get all the relevant backend information for this edge edge_data = { 'id': edge.get('id'), 'name': edge.get('name'), 'size': size, 'type': edge.get('edgeType'), 'ha': ha, } backend_edges.append(edge_data) return backend_edges def get_edge_syslog_info(edge_id): """Get syslog information for specific edge id""" nsxv = get_nsxv_client() syslog_info = nsxv.get_edge_syslog(edge_id)[1] if not syslog_info['enabled']: return 'Disabled' output = "" if 'protocol' in syslog_info: output += syslog_info['protocol'] if 'serverAddresses' in syslog_info: for server_address in syslog_info['serverAddresses']['ipAddress']: output += "\n" + server_address return output vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv/resources/dhcp_binding.py0000666000175100017510000003522513244523345030007 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pprint import sys from neutron_lib import context as n_context from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.shell.admin.plugins.common import constants import vmware_nsx.shell.admin.plugins.common.utils as admin_utils import vmware_nsx.shell.admin.plugins.nsxv.resources.utils as utils import vmware_nsx.shell.resources as shell from neutron_lib.callbacks import registry from neutron_lib import exceptions as nl_exc from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as nsxv_constants) from vmware_nsx.plugins.nsx_v.vshield.common import exceptions from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.plugins.nsx_v.vshield import vcns_driver LOG = logging.getLogger(__name__) nsxv = utils.get_nsxv_client() neutron_db = utils.NeutronDbClient() def nsx_get_static_bindings_by_edge(edge_id): nsx_dhcp_static_bindings = set() try: nsx_dhcp_bindings = nsxv.query_dhcp_configuration(edge_id) except exceptions.ResourceNotFound: LOG.error("Edge %s was not found", edge_id) return # nsx_dhcp_bindings[0] contains response headers; # nsx_dhcp_bindings[1] contains response payload sbindings = nsx_dhcp_bindings[1].get('staticBindings').get( 'staticBindings') for binding in sbindings: nsx_dhcp_static_bindings.add( (edge_id, binding.get('macAddress').lower(), binding.get('bindingId').lower())) return nsx_dhcp_static_bindings def neutron_get_static_bindings_by_edge(edge_id): neutron_db_dhcp_bindings = set() for binding in nsxv_db.get_dhcp_static_bindings_by_edge( neutron_db.context.session, edge_id): neutron_db_dhcp_bindings.add( (binding.edge_id, binding.mac_address.lower(), binding.binding_id.lower())) return neutron_db_dhcp_bindings @admin_utils.output_header def list_missing_dhcp_bindings(resource, event, trigger, **kwargs): """List missing DHCP bindings from NSXv backend. Missing DHCP bindings are those that exist in Neutron DB; but are not present on corresponding NSXv Edge. """ for (edge_id, count) in nsxv_db.get_nsxv_dhcp_bindings_count_per_edge( neutron_db.context.session): LOG.info("%s", "=" * 60) LOG.info("For edge: %s", edge_id) nsx_dhcp_static_bindings = nsx_get_static_bindings_by_edge(edge_id) if nsx_dhcp_static_bindings is None: continue neutron_dhcp_static_bindings = \ neutron_get_static_bindings_by_edge(edge_id) LOG.info("# of DHCP bindings in Neutron DB: %s", len(neutron_dhcp_static_bindings)) LOG.info("# of DHCP bindings on NSXv backend: %s", len(nsx_dhcp_static_bindings)) missing = neutron_dhcp_static_bindings - nsx_dhcp_static_bindings if not missing: LOG.info("No missing DHCP bindings found.") LOG.info("Neutron DB and NSXv backend are in sync") else: LOG.info("Missing DHCP bindings:") LOG.info("%s", pprint.pformat(missing)) @admin_utils.output_header def nsx_update_dhcp_edge_binding(resource, event, trigger, **kwargs): """Resync DHCP bindings on NSXv Edge""" if not kwargs.get('property'): LOG.error("Need to specify edge-id parameter") return else: properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) edge_id = properties.get('edge-id') if not edge_id: LOG.error("Need to specify edge-id parameter") return LOG.info("Updating NSXv Edge: %s", edge_id) # Need to create a plugin object; so that we are able to # do neutron list-ports. with utils.NsxVPluginWrapper() as plugin: nsxv_manager = vcns_driver.VcnsDriver( edge_utils.NsxVCallbacks(plugin)) edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin) try: edge_manager.update_dhcp_service_config( neutron_db.context, edge_id) except exceptions.ResourceNotFound: LOG.error("Edge %s not found", edge_id) def delete_old_dhcp_edge(context, old_edge_id, bindings): LOG.info("Deleting the old DHCP edge: %s", old_edge_id) with locking.LockManager.get_lock(old_edge_id): # Delete from NSXv backend # Note - If we will not delete the router, but free it - it will be # immediately used as the new one, So it is better to delete it. try: nsxv.delete_edge(old_edge_id) except Exception as e: LOG.warning("Failed to delete the old edge %(id)s: %(e)s", {'id': old_edge_id, 'e': e}) # Continue the process anyway # The edge may have been already deleted at the backend try: # Remove bindings from Neutron DB nsxv_db.clean_edge_router_binding(context.session, old_edge_id) nsxv_db.clean_edge_vnic_binding(context.session, old_edge_id) except Exception as e: LOG.warning("Failed to delete the old edge %(id)s from the " "DB : %(e)s", {'id': old_edge_id, 'e': e}) def recreate_network_dhcp(context, plugin, edge_manager, old_edge_id, net_id): """Handle the DHCP edge recreation of a network """ LOG.info("Moving network %s to a new edge", net_id) # delete the old binding resource_id = (nsxv_constants.DHCP_EDGE_PREFIX + net_id)[:36] nsxv_db.delete_nsxv_router_binding(context.session, resource_id) # Delete the old static binding of the networks` compute ports port_filters = {'network_id': [net_id], 'device_owner': ['compute:None']} compute_ports = plugin.get_ports(context, filters=port_filters) if old_edge_id: for port in compute_ports: # Delete old binding from the DB nsxv_db.delete_edge_dhcp_static_binding(context.session, old_edge_id, port['mac_address']) # Go over all the subnets with DHCP net_filters = {'network_id': [net_id], 'enable_dhcp': [True]} subnets = plugin.get_subnets(context, filters=net_filters) for subnet in subnets: LOG.info("Moving subnet %s to a new edge", subnet['id']) # allocate / reuse the new dhcp edge new_resource_id = edge_manager.create_dhcp_edge_service( context, net_id, subnet) if new_resource_id: # also add fw rules and metadata, once for the new edge plugin._update_dhcp_service_new_edge(context, resource_id) # Update the ip of the dhcp port LOG.info("Creating network %s DHCP address group", net_id) address_groups = plugin._create_network_dhcp_address_group( context, net_id) plugin.edge_manager.update_dhcp_edge_service( context, net_id, address_groups=address_groups) # find out the id of the new edge: new_binding = nsxv_db.get_nsxv_router_binding( context.session, resource_id) if new_binding: LOG.info("Network %(net_id)s was moved to edge %(edge_id)s", {'net_id': net_id, 'edge_id': new_binding['edge_id']}) else: LOG.error("Network %(net_id)s was not moved to a new edge", {'net_id': net_id}) @admin_utils.output_header def nsx_recreate_dhcp_edge(resource, event, trigger, **kwargs): """Recreate a dhcp edge with all the networks on a new NSXv edge""" usage_msg = ("Need to specify edge-id or net-id parameter") if not kwargs.get('property'): LOG.error(usage_msg) return # input validation properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) old_edge_id = properties.get('edge-id') if not old_edge_id: # if the net-id property exist - recreate the edge for this network net_id = properties.get('net-id') if net_id: nsx_recreate_dhcp_edge_by_net_id(net_id) return LOG.error(usage_msg) return LOG.info("ReCreating NSXv Edge: %s", old_edge_id) context = n_context.get_admin_context() # verify that this is a DHCP edge bindings = nsxv_db.get_nsxv_router_bindings_by_edge( context.session, old_edge_id) if (not bindings or not bindings[0]['router_id'].startswith( nsxv_constants.DHCP_EDGE_PREFIX)): LOG.error("Edge %(edge_id)s is not a DHCP edge", {'edge_id': old_edge_id}) return # init the plugin and edge manager cfg.CONF.set_override('core_plugin', 'vmware_nsx.shell.admin.plugins.nsxv.resources' '.utils.NsxVPluginWrapper') with utils.NsxVPluginWrapper() as plugin: nsxv_manager = vcns_driver.VcnsDriver( edge_utils.NsxVCallbacks(plugin)) edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin) # find the networks bound to this DHCP edge networks_binding = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, old_edge_id) network_ids = [binding['network_id'] for binding in networks_binding] # Delete the old edge delete_old_dhcp_edge(context, old_edge_id, bindings) # Move all the networks to other (new or existing) edge for net_id in network_ids: recreate_network_dhcp(context, plugin, edge_manager, old_edge_id, net_id) def nsx_recreate_dhcp_edge_by_net_id(net_id): """Recreate a dhcp edge for a specific network without an edge""" LOG.info("ReCreating NSXv Edge for network: %s", net_id) context = n_context.get_admin_context() # init the plugin and edge manager cfg.CONF.set_override('core_plugin', 'vmware_nsx.shell.admin.plugins.nsxv.resources' '.utils.NsxVPluginWrapper') with utils.NsxVPluginWrapper() as plugin: nsxv_manager = vcns_driver.VcnsDriver(edge_utils.NsxVCallbacks(plugin)) edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin) # verify that there is no DHCP edge for this network at the moment resource_id = (nsxv_constants.DHCP_EDGE_PREFIX + net_id)[:36] router_binding = nsxv_db.get_nsxv_router_binding( context.session, resource_id) if router_binding: # make sure there is no real edge if router_binding['edge_id']: edge_id = router_binding['edge_id'] try: nsxv_manager.vcns.get_edge(edge_id) except exceptions.ResourceNotFound: # No edge on backend # prevent logger from logging this exception sys.exc_clear() LOG.info("Edge %s does not exist on the NSX", edge_id) else: LOG.warning("Network %(net_id)s already has a dhcp edge: " "%(edge_id)s", {'edge_id': edge_id, 'net_id': net_id}) return # delete this old entry nsxv_db.delete_nsxv_router_binding(context.session, resource_id) # Verify that the network exists on neutron try: plugin.get_network(context, net_id) except nl_exc.NetworkNotFound: LOG.error("Network %s does not exist", net_id) return recreate_network_dhcp(context, plugin, edge_manager, None, net_id) @admin_utils.output_header def nsx_redistribute_dhcp_edges(resource, event, trigger, **kwargs): """If any of the DHCP networks are on a conflicting edge move them""" context = n_context.get_admin_context() with utils.NsxVPluginWrapper() as plugin: nsxv_manager = vcns_driver.VcnsDriver( edge_utils.NsxVCallbacks(plugin)) edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin) # go over all DHCP subnets networks = plugin.get_networks(context) for network in networks: network_id = network['id'] # Check if the network has a related DHCP edge resource_id = (nsxv_constants.DHCP_EDGE_PREFIX + network_id)[:36] dhcp_edge_binding = nsxv_db.get_nsxv_router_binding( context.session, resource_id) if not dhcp_edge_binding: continue LOG.info("Checking network %s", network_id) edge_id = dhcp_edge_binding['edge_id'] availability_zone = plugin.get_network_az_by_net_id( context, network['id']) filters = {'network_id': [network_id], 'enable_dhcp': [True]} subnets = plugin.get_subnets(context, filters=filters) for subnet in subnets: (conflict_edge_ids, available_edge_ids) = edge_manager._get_used_edges( context, subnet, availability_zone) if edge_id in conflict_edge_ids: # move the DHCP to another edge LOG.info("Network %(net)s on DHCP edge %(edge)s is " "conflicting with another network and will be " "moved", {'net': network_id, 'edge': edge_id}) edge_manager.remove_network_from_dhcp_edge( context, network_id, edge_id) edge_manager.create_dhcp_edge_service( context, network_id, subnet) break registry.subscribe(list_missing_dhcp_bindings, constants.DHCP_BINDING, shell.Operations.LIST.value) registry.subscribe(nsx_update_dhcp_edge_binding, constants.DHCP_BINDING, shell.Operations.NSX_UPDATE.value) registry.subscribe(nsx_recreate_dhcp_edge, constants.DHCP_BINDING, shell.Operations.NSX_RECREATE.value) registry.subscribe(nsx_redistribute_dhcp_edges, constants.DHCP_BINDING, shell.Operations.NSX_REDISTRIBURE.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv/resources/spoofguard_policy.py0000666000175100017510000001304113244523345031117 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters import vmware_nsx.shell.admin.plugins.common.utils as admin_utils import vmware_nsx.shell.admin.plugins.nsxv.resources.utils as utils import vmware_nsx.shell.resources as shell from neutron_lib.callbacks import registry from neutron_lib import exceptions from vmware_nsx.db import nsxv_db from oslo_log import log as logging LOG = logging.getLogger(__name__) nsxv = utils.get_nsxv_client() def get_spoofguard_policies(): nsxv = utils.get_nsxv_client() return nsxv.get_spoofguard_policies()[1].get("policies") @admin_utils.output_header def nsx_list_spoofguard_policies(resource, event, trigger, **kwargs): """List spoofguard policies from NSXv backend""" policies = get_spoofguard_policies() LOG.info(formatters.output_formatter(constants.SPOOFGUARD_POLICY, policies, ['policyId', 'name'])) def get_spoofguard_policy_network_mappings(): spgapi = utils.NeutronDbClient() return nsxv_db.get_nsxv_spoofguard_policy_network_mappings( spgapi.context) @admin_utils.output_header def neutron_list_spoofguard_policy_mappings(resource, event, trigger, **kwargs): mappings = get_spoofguard_policy_network_mappings() LOG.info(formatters.output_formatter(constants.SPOOFGUARD_POLICY, mappings, ['network_id', 'policy_id'])) def get_missing_spoofguard_policy_mappings(reverse=None): nsxv_spoofguard_policies = set() for spg in get_spoofguard_policies(): nsxv_spoofguard_policies.add(spg.get('policyId')) neutron_spoofguard_policy_mappings = set() for binding in get_spoofguard_policy_network_mappings(): neutron_spoofguard_policy_mappings.add(binding.policy_id) if reverse: return nsxv_spoofguard_policies - neutron_spoofguard_policy_mappings else: return neutron_spoofguard_policy_mappings - nsxv_spoofguard_policies @admin_utils.output_header def nsx_list_missing_spoofguard_policies(resource, event, trigger, **kwargs): """List missing spoofguard policies on NSXv. Spoofguard policies that have a binding in Neutron Db but there is no policy on NSXv backend to back it. """ props = kwargs.get('property') reverse = True if props and props[0] == 'reverse' else False if reverse: LOG.info("Spoofguard policies on NSXv but not present in " "Neutron Db") else: LOG.info("Spoofguard policies in Neutron Db but not present " "on NSXv") missing_policies = get_missing_spoofguard_policy_mappings(reverse) if not missing_policies: LOG.info("\nNo missing spoofguard policies found." "\nNeutron DB and NSXv backend are in sync\n") else: LOG.info(missing_policies) missing_policies = [{'policy_id': pid} for pid in missing_policies] LOG.info(formatters.output_formatter( constants.SPOOFGUARD_POLICY, missing_policies, ['policy_id'])) def nsx_clean_spoofguard_policy(resource, event, trigger, **kwargs): """Delete spoofguard policy""" errmsg = ("Need to specify policy-id. Add --property " "policy-id=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) policy_id = properties.get('policy-id') if not policy_id: LOG.error("%s", errmsg) return try: h, c = nsxv.get_spoofguard_policy(policy_id) except exceptions.NeutronException as e: LOG.error("Unable to retrieve policy %(p)s: %(e)s", {'p': policy_id, 'e': str(e)}) else: if not c.get('spoofguardList'): LOG.error("Policy %s does not exist", policy_id) return confirm = admin_utils.query_yes_no( "Do you want to delete spoofguard-policy: %s" % policy_id, default="no") if not confirm: LOG.info("spoofguard-policy deletion aborted by user") return try: nsxv.delete_spoofguard_policy(policy_id) except Exception as e: LOG.error("%s", str(e)) LOG.info('spoofguard-policy successfully deleted.') registry.subscribe(neutron_list_spoofguard_policy_mappings, constants.SPOOFGUARD_POLICY, shell.Operations.LIST.value) registry.subscribe(nsx_list_spoofguard_policies, constants.SPOOFGUARD_POLICY, shell.Operations.LIST.value) registry.subscribe(nsx_list_missing_spoofguard_policies, constants.SPOOFGUARD_POLICY, shell.Operations.LIST.value) registry.subscribe(nsx_clean_spoofguard_policy, constants.SPOOFGUARD_POLICY, shell.Operations.CLEAN.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv/resources/backup_edges.py0000666000175100017510000003202613244523345030007 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db import l3_db from neutron_lib.callbacks import registry from neutron_lib import exceptions from oslo_log import log as logging from oslo_utils import uuidutils from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.db import nsxv_db from vmware_nsx.db import nsxv_models from vmware_nsx.plugins.nsx_v.vshield.common import constants as vcns_const from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters import vmware_nsx.shell.admin.plugins.common.utils as admin_utils import vmware_nsx.shell.admin.plugins.nsxv.resources.utils as utils import vmware_nsx.shell.resources as shell LOG = logging.getLogger(__name__) nsxv = utils.get_nsxv_client() _uuid = uuidutils.generate_uuid def get_nsxv_backup_edges(): edges = utils.get_nsxv_backend_edges() backup_edges = [] edgeapi = utils.NeutronDbClient() for edge in edges: if edge['name'].startswith("backup-"): # Make sure it is really a backup edge edge_vnic_binds = nsxv_db.get_edge_vnic_bindings_by_edge( edgeapi.context.session, edge['id']) if not edge_vnic_binds: extend_edge_info(edge) backup_edges.append(edge) return backup_edges def extend_edge_info(edge): """Add information from the nsxv-db, if available""" edgeapi = utils.NeutronDbClient() rtr_binding = nsxv_db.get_nsxv_router_binding_by_edge( edgeapi.context.session, edge['id']) if rtr_binding: edge['availability_zone'] = rtr_binding['availability_zone'] edge['db_status'] = rtr_binding['status'] @admin_utils.output_header def nsx_list_backup_edges(resource, event, trigger, **kwargs): """List backup edges""" backup_edges = get_nsxv_backup_edges() LOG.info(formatters.output_formatter( constants.BACKUP_EDGES, backup_edges, ['id', 'name', 'size', 'type', 'availability_zone', 'db_status'])) def _delete_backup_from_neutron_db(edge_id, router_id): # Remove bindings from Neutron DB edgeapi = utils.NeutronDbClient() nsxv_db.delete_nsxv_router_binding( edgeapi.context.session, router_id) if edge_id: nsxv_db.clean_edge_vnic_binding(edgeapi.context.session, edge_id) def _delete_edge_from_nsx_and_neutron(edge_id, router_id): try: with locking.LockManager.get_lock(edge_id): # Delete from NSXv backend nsxv.delete_edge(edge_id) # Remove bindings from Neutron DB _delete_backup_from_neutron_db(edge_id, router_id) return True except Exception as expt: LOG.error("%s", str(expt)) return False def _nsx_delete_backup_edge(edge_id, all_backup_edges): """Delete a specific backup edge""" try: edge_result = nsxv.get_edge(edge_id) except exceptions.NeutronException as x: LOG.error("%s", str(x)) else: # edge_result[0] is response status code # edge_result[1] is response body edge = edge_result[1] backup_edges = [e['id'] for e in all_backup_edges] if (not edge['name'].startswith('backup-') or edge['id'] not in backup_edges): LOG.error( 'Edge: %s is not a backup edge; aborting delete', edge_id) else: return _delete_edge_from_nsx_and_neutron(edge_id, edge['name']) def nsx_clean_backup_edge(resource, event, trigger, **kwargs): """Delete backup edge""" errmsg = ("Need to specify edge-id property. Add --property " "edge-id=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) edge_id = properties.get('edge-id') if not edge_id: LOG.error("%s", errmsg) return #ask for the user confirmation confirm = admin_utils.query_yes_no( "Do you want to delete edge: %s" % edge_id, default="no") if not confirm: LOG.info("Backup edge deletion aborted by user") return # delete the backup edge _nsx_delete_backup_edge(edge_id, get_nsxv_backup_edges()) def nsx_clean_all_backup_edges(resource, event, trigger, **kwargs): """Delete all backup edges""" backup_edges = get_nsxv_backup_edges() #ask for the user confirmation confirm = admin_utils.query_yes_no( "Do you want to delete %s backup edges?" % len(backup_edges), default="no") if not confirm: LOG.info("Backup edges deletion aborted by user") return deleted_cnt = 0 for edge in backup_edges: # delete the backup edge if _nsx_delete_backup_edge(edge['id'], backup_edges): deleted_cnt = deleted_cnt + 1 LOG.info('Done Deleting %s backup edges', deleted_cnt) @admin_utils.output_header def neutron_clean_backup_edge(resource, event, trigger, **kwargs): """Delete a backup edge from the neutron, and backend by it's name The name of the backup edge is the router-id column in the BD table nsxv_router_bindings, and it is also printed by list-mismatches """ errmsg = ("Need to specify router-id property. Add --property " "router-id=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) router_id = properties.get('router-id') if not router_id: LOG.error("%s", errmsg) return # look for the router-binding entry edgeapi = utils.NeutronDbClient() rtr_binding = nsxv_db.get_nsxv_router_binding( edgeapi.context.session, router_id) if not rtr_binding: LOG.error('Backup %s was not found in DB', router_id) return edge_id = rtr_binding['edge_id'] if edge_id: # delete from backend too _delete_edge_from_nsx_and_neutron(edge_id, router_id) else: # delete only from DB _delete_backup_from_neutron_db(None, router_id) @admin_utils.output_header def nsx_list_name_mismatches(resource, event, trigger, **kwargs): edges = utils.get_nsxv_backend_edges() plugin_nsx_mismatch = [] backend_edge_ids = [] edgeapi = utils.NeutronDbClient() # Look for edges with the wrong names: for edge in edges: backend_edge_ids.append(edge['id']) rtr_binding = nsxv_db.get_nsxv_router_binding_by_edge( edgeapi.context.session, edge['id']) if (rtr_binding and edge['name'].startswith('backup-') and rtr_binding['router_id'] != edge['name']): plugin_nsx_mismatch.append( {'edge_id': edge['id'], 'edge_name': edge['name'], 'router_id': rtr_binding['router_id']}) LOG.info(formatters.output_formatter( constants.BACKUP_EDGES + ' with name mismatch:', plugin_nsx_mismatch, ['edge_id', 'edge_name', 'router_id'])) # Also look for missing edges like_filters = {'router_id': vcns_const.BACKUP_ROUTER_PREFIX + "%"} rtr_bindings = nsxv_db.get_nsxv_router_bindings(edgeapi.context.session, like_filters=like_filters) plugin_nsx_missing = [] for rtr_binding in rtr_bindings: if rtr_binding['edge_id'] not in backend_edge_ids: plugin_nsx_missing.append( {'edge_id': rtr_binding['edge_id'], 'router_id': rtr_binding['router_id'], 'db_status': rtr_binding['status']}) LOG.info(formatters.output_formatter( constants.BACKUP_EDGES + ' missing from backend:', plugin_nsx_missing, ['edge_id', 'router_id', 'db_status'])) def nsx_fix_name_mismatch(resource, event, trigger, **kwargs): errmsg = ("Need to specify edge-id property. Add --property " "edge-id=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) edgeapi = utils.NeutronDbClient() edge_id = properties.get('edge-id') if not edge_id: LOG.error("%s", errmsg) return try: # edge[0] is response status code # edge[1] is response body edge = nsxv.get_edge(edge_id)[1] except exceptions.NeutronException as e: LOG.error("%s", str(e)) else: if edge['name'].startswith('backup-'): rtr_binding = nsxv_db.get_nsxv_router_binding_by_edge( edgeapi.context.session, edge['id']) if rtr_binding['router_id'] == edge['name']: LOG.error('Edge %s no mismatch with NSX', edge_id) return try: with locking.LockManager.get_lock(edge_id): # Update edge at NSXv backend if rtr_binding['router_id'].startswith('dhcp-'): # Edge is a DHCP edge - just use router_id as name edge['name'] = rtr_binding['router_id'] else: # This is a router - if shared, prefix with 'shared-' nsx_attr = (edgeapi.context.session.query( nsxv_models.NsxvRouterExtAttributes).filter_by( router_id=rtr_binding['router_id']).first()) if nsx_attr and nsx_attr['router_type'] == 'shared': edge['name'] = ('shared-' + _uuid())[ :vcns_const.EDGE_NAME_LEN] elif (nsx_attr and nsx_attr['router_type'] == 'exclusive'): rtr_db = (edgeapi.context.session.query( l3_db.Router).filter_by( id=rtr_binding['router_id']).first()) if rtr_db: edge['name'] = ( rtr_db['name'][ :nsxv_constants.ROUTER_NAME_LENGTH - len(rtr_db['id'])] + '-' + rtr_db['id']) else: LOG.error( 'No database entry for router id %s', rtr_binding['router_id']) else: LOG.error( 'Could not determine the name for ' 'Edge %s', edge_id) return confirm = admin_utils.query_yes_no( "Do you want to rename edge %s to %s" % (edge_id, edge['name']), default="no") if not confirm: LOG.info("Edge rename aborted by user") return LOG.info("Edge rename started") # remove some keys that will fail the NSX transaction edge_utils.remove_irrelevant_keys_from_edge_request(edge) try: LOG.error("Update edge...") nsxv.update_edge(edge_id, edge) except Exception as e: LOG.error("Update failed - %s", (e)) except Exception as e: LOG.error("%s", str(e)) else: LOG.error( 'Edge %s has no backup prefix on NSX', edge_id) return registry.subscribe(nsx_list_backup_edges, constants.BACKUP_EDGES, shell.Operations.LIST.value) registry.subscribe(nsx_clean_backup_edge, constants.BACKUP_EDGES, shell.Operations.CLEAN.value) registry.subscribe(nsx_clean_all_backup_edges, constants.BACKUP_EDGES, shell.Operations.CLEAN_ALL.value) registry.subscribe(nsx_list_name_mismatches, constants.BACKUP_EDGES, shell.Operations.LIST_MISMATCHES.value) registry.subscribe(nsx_fix_name_mismatch, constants.BACKUP_EDGES, shell.Operations.FIX_MISMATCH.value) registry.subscribe(neutron_clean_backup_edge, constants.BACKUP_EDGES, shell.Operations.NEUTRON_CLEAN.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv/resources/config.py0000666000175100017510000000325013244523345026635 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import registry from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.plugins.nsx_v.vshield.common import exceptions from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv.resources import utils from vmware_nsx.shell import resources as shell LOG = logging.getLogger(__name__) @admin_utils.output_header def validate_configuration(resource, event, trigger, **kwargs): """Validate the nsxv configuration""" try: utils.NsxVPluginWrapper() except exceptions.Forbidden: LOG.error("Configuration validation failed: wrong VSM credentials " "for %s", cfg.CONF.nsxv.manager_uri) except Exception as e: LOG.error("Configuration validation failed: %s", e) else: LOG.info("Configuration validation succeeded") registry.subscribe(validate_configuration, constants.CONFIG, shell.Operations.VALIDATE.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv/resources/__init__.py0000666000175100017510000000000013244523345027115 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv/resources/routers.py0000666000175100017510000003606713244523345027107 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters import vmware_nsx.shell.admin.plugins.common.utils as admin_utils import vmware_nsx.shell.admin.plugins.nsxv.resources.utils as utils import vmware_nsx.shell.resources as shell from neutron_lib.callbacks import registry from neutron_lib import context as n_context from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import routersize from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.plugins.nsx_v.vshield import vcns_driver LOG = logging.getLogger(__name__) def delete_old_edge(context, old_edge_id): LOG.info("Deleting the old edge: %s", old_edge_id) # clean it up from the DB nsxv_db.clean_edge_router_binding(context.session, old_edge_id) nsxv_db.clean_edge_vnic_binding(context.session, old_edge_id) nsxv_db.cleanup_nsxv_edge_firewallrule_binding(context.session, old_edge_id) with locking.LockManager.get_lock(old_edge_id): # Delete from NSXv backend # Note - If we will not delete the edge, but free it - it will be # immediately used as the new one, So it is better to delete it. try: nsxv = utils.get_nsxv_client() nsxv.delete_edge(old_edge_id) except Exception as e: LOG.warning("Failed to delete the old edge %(id)s: %(e)s", {'id': old_edge_id, 'e': e}) # Continue the process anyway # The edge may have been already deleted at the backend def _get_router_az_from_plugin_router(router): # If the router edge was already deployed the availability_zones will # return the az az_name = router.get('availability_zones', [''])[0] if not az_name: # If it was not deployed - it may be in the creation hints az_name = router.get('availability_zones_hints', [''])[0] if not az_name: # If not - the default az was used. az_name = nsx_az.DEFAULT_NAME return az_name def nsx_recreate_router_edge(old_edge_id): # init the plugin and edge manager cfg.CONF.set_override('core_plugin', 'vmware_nsx.shell.admin.plugins.nsxv.resources' '.utils.NsxVPluginWrapper') with utils.NsxVPluginWrapper() as plugin: nsxv_manager = vcns_driver.VcnsDriver( edge_utils.NsxVCallbacks(plugin)) edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin) context = n_context.get_admin_context() # verify that this is a Router edge router_ids = edge_manager.get_routers_on_edge(context, old_edge_id) if not router_ids: LOG.error("Edge %(edge_id)s is not a router edge", {'edge_id': old_edge_id}) return # all the routers on the same edge have the same type, so it # is ok to check the type once example_router = plugin.get_router(context, router_ids[0]) if example_router.get('distributed'): LOG.error("Recreating a distributed router edge is not " "supported") return router_driver = plugin._router_managers.get_tenant_router_driver( context, example_router['router_type']) # load all the routers before deleting their binding routers = [] for router_id in router_ids: routers.append(plugin.get_router(context, router_id)) # delete the backend edge and all the relevant DB entries delete_old_edge(context, old_edge_id) # Go over all the relevant routers for router in routers: router_id = router['id'] az_name = _get_router_az_from_plugin_router(router) # clean up other objects related to this router if plugin.metadata_proxy_handler: md_proxy = plugin.get_metadata_proxy_handler(az_name) md_proxy.cleanup_router_edge(context, router_id) # attach the router to a new edge appliance_size = router.get(routersize.ROUTER_SIZE) router_driver.attach_router(context, router_id, {'router': router}, appliance_size=appliance_size) # find out who is the new edge to print it new_edge_id = router_driver._get_edge_id_or_raise( context, router_id) LOG.info("Router %(router)s was attached to edge %(edge)s", {'router': router_id, 'edge': new_edge_id}) def nsx_recreate_router(router_id): # init the plugin and edge manager cfg.CONF.set_override('core_plugin', 'vmware_nsx.shell.admin.plugins.nsxv.resources' '.utils.NsxVPluginWrapper') with utils.NsxVPluginWrapper() as plugin: context = n_context.get_admin_context() router = plugin.get_router(context, router_id) if router.get('distributed'): LOG.error("Recreating a distributed router is not supported") return router_driver = plugin._router_managers.get_tenant_router_driver( context, router['router_type']) # Check if it is already attached to an edge binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if binding: old_edge_id = binding['edge_id'] # detach the router from this edge LOG.info("Detaching the router from edge %s", old_edge_id) router_driver.detach_router(context, router_id, {'router': router}) # attach the router to a new edge appliance_size = router.get(routersize.ROUTER_SIZE) router_driver.attach_router(context, router_id, {'router': router}, appliance_size=appliance_size) # find out who is the new edge to print it new_edge_id = router_driver._get_edge_id_or_raise( context, router_id) LOG.info("Router %(router)s was attached to edge %(edge)s", {'router': router_id, 'edge': new_edge_id}) @admin_utils.output_header def nsx_recreate_router_or_edge(resource, event, trigger, **kwargs): """Recreate a router edge with all the data on a new NSXv edge""" if not kwargs.get('property'): LOG.error("Need to specify edge-id or router-id parameter") return # input validation properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) old_edge_id = properties.get('edge-id') router_id = properties.get('router-id') if (not old_edge_id and not router_id) or (old_edge_id and router_id): LOG.error("Need to specify edge-id or router-id parameter") return if old_edge_id: LOG.info("ReCreating NSXv Router Edge: %s", old_edge_id) return nsx_recreate_router_edge(old_edge_id) else: LOG.info("ReCreating NSXv Router: %s", router_id) return nsx_recreate_router(router_id) @admin_utils.output_header def migrate_distributed_routers_dhcp(resource, event, trigger, **kwargs): context = n_context.get_admin_context() nsxv = utils.get_nsxv_client() with utils.NsxVPluginWrapper() as plugin: routers = plugin.get_routers(context) for router in routers: if router.get('distributed', False): binding = nsxv_db.get_nsxv_router_binding(context.session, router['id']) if binding: edge_id = binding['edge_id'] with locking.LockManager.get_lock(edge_id): route_obj = nsxv.get_routes(edge_id)[1] routes = route_obj.get('staticRoutes', {} ).get('staticRoutes', []) new_routes = [route for route in routes if route.get( 'network') != '169.254.169.254/32'] route_obj['staticRoutes']['staticRoutes'] = new_routes nsxv.update_routes(edge_id, route_obj) def is_router_conflicting_on_edge(context, driver, router_id): edge_id = edge_utils.get_router_edge_id(context, router_id) if not edge_id: return False (available_routers, conflict_routers) = driver._get_available_and_conflicting_ids( context, router_id) for conf_router in conflict_routers: conf_edge_id = edge_utils.get_router_edge_id(context, conf_router) if conf_edge_id == edge_id: LOG.info("Router %(rtr)s on edge %(edge)s is conflicting with " "another router and will be moved", {'rtr': router_id, 'edge': edge_id}) return True return False @admin_utils.output_header def redistribute_routers(resource, event, trigger, **kwargs): """If any of the shared routers are on a conflicting edge move them""" context = n_context.get_admin_context() with utils.NsxVPluginWrapper() as plugin: router_driver = plugin._router_managers.get_tenant_router_driver( context, 'shared') routers = plugin.get_routers(context) for router in routers: if (not router.get('distributed', False) and router.get('router_type') == 'shared' and is_router_conflicting_on_edge( context, router_driver, router['id'])): router_driver.detach_router(context, router['id'], router) router_driver.attach_router(context, router['id'], router) @admin_utils.output_header def list_orphaned_vnics(resource, event, trigger, **kwargs): """List router orphaned router vnics where the port was deleted""" orphaned_vnics = get_orphaned_vnics() if not orphaned_vnics: LOG.info("No orphaned router vnics found") return headers = ['edge_id', 'vnic_index', 'tunnel_index', 'network_id'] LOG.info(formatters.output_formatter(constants.ORPHANED_VNICS, orphaned_vnics, headers)) def get_orphaned_vnics(): orphaned_vnics = [] context = n_context.get_admin_context() vnic_binds = nsxv_db.get_edge_vnic_bindings_with_networks( context.session) with utils.NsxVPluginWrapper() as plugin: for vnic_bind in vnic_binds: edge_id = vnic_bind['edge_id'] # check if this is a router edge by the router bindings table router_bindings = nsxv_db.get_nsxv_router_bindings_by_edge( context.session, edge_id) if not router_bindings: # Only log it. this is a different type of orphaned LOG.warning("Router bindings for vnic %s not found", vnic_bind) continue router_ids = [b['router_id'] for b in router_bindings] routers = plugin.get_routers(context, filters={'id': router_ids}) if routers: interface_found = False # check if any of those routers is attached to this network for router in routers: if plugin._get_router_interface_ports_by_network( context, router['id'], vnic_bind['network_id']): interface_found = True break if not interface_found: # for later deleting the interface we need to know if this # is a distributed router. # All the routers on the same edge are of the same type, # so we can check the first one. vnic_bind['distributed'] = routers[0].get('distributed') orphaned_vnics.append(vnic_bind) return orphaned_vnics @admin_utils.output_header def clean_orphaned_vnics(resource, event, trigger, **kwargs): """List router orphaned router vnics where the port was deleted""" orphaned_vnics = get_orphaned_vnics() if not orphaned_vnics: LOG.info("No orphaned router vnics found") return headers = ['edge_id', 'vnic_index', 'tunnel_index', 'network_id'] LOG.info(formatters.output_formatter(constants.ORPHANED_VNICS, orphaned_vnics, headers)) user_confirm = admin_utils.query_yes_no("Do you want to delete " "orphaned vnics", default="no") if not user_confirm: LOG.info("NSXv vnics deletion aborted by user") return context = n_context.get_admin_context() with utils.NsxVPluginWrapper() as plugin: nsxv_manager = vcns_driver.VcnsDriver( edge_utils.NsxVCallbacks(plugin)) for vnic in orphaned_vnics: if not vnic['distributed']: try: nsxv_manager.vcns.delete_interface( vnic['edge_id'], vnic['vnic_index']) except Exception as e: LOG.error("Failed to delete vnic from NSX: %s", e) nsxv_db.free_edge_vnic_by_network( context.session, vnic['edge_id'], vnic['network_id']) else: try: nsxv_manager.vcns.delete_vdr_internal_interface( vnic['edge_id'], vnic['vnic_index']) except Exception as e: LOG.error("Failed to delete vnic from NSX: %s", e) nsxv_db.delete_edge_vnic_binding_by_network( context.session, vnic['edge_id'], vnic['network_id']) registry.subscribe(nsx_recreate_router_or_edge, constants.ROUTERS, shell.Operations.NSX_RECREATE.value) registry.subscribe(migrate_distributed_routers_dhcp, constants.ROUTERS, shell.Operations.MIGRATE_VDR_DHCP.value) registry.subscribe(redistribute_routers, constants.ROUTERS, shell.Operations.NSX_REDISTRIBURE.value) registry.subscribe(list_orphaned_vnics, constants.ORPHANED_VNICS, shell.Operations.NSX_LIST.value) registry.subscribe(clean_orphaned_vnics, constants.ORPHANED_VNICS, shell.Operations.NSX_CLEAN.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv/resources/edges.py0000666000175100017510000006121713244523345026466 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pprint import textwrap from vmware_nsx.common import config from vmware_nsx.dvs import dvs from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters import vmware_nsx.shell.admin.plugins.common.utils as admin_utils import vmware_nsx.shell.admin.plugins.nsxv.resources.utils as utils import vmware_nsx.shell.resources as shell from neutron_lib.callbacks import registry from neutron_lib import context as n_context from neutron_lib import exceptions from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.common import nsxv_constants from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) import vmware_nsx.plugins.nsx_v.vshield.common.exceptions as nsxv_exceptions LOG = logging.getLogger(__name__) nsxv = utils.get_nsxv_client() @admin_utils.output_header def nsx_list_edges(resource, event, trigger, **kwargs): """List edges from NSXv backend""" headers = ['id', 'name', 'type', 'size', 'ha'] edges = utils.get_nsxv_backend_edges() if (kwargs.get('verbose')): headers += ['syslog'] extend_edge_info(edges) LOG.info(formatters.output_formatter(constants.EDGES, edges, headers)) def extend_edge_info(edges): """Add syslog info to each edge in list""" for edge in edges: # for the table to remain human readable, we need to # wrap long edge names edge['name'] = textwrap.fill(edge['name'], 25) edge['syslog'] = utils.get_edge_syslog_info(edge['id']) def get_router_edge_bindings(): edgeapi = utils.NeutronDbClient() return nsxv_db.get_nsxv_router_bindings(edgeapi.context) @admin_utils.output_header def neutron_list_router_edge_bindings(resource, event, trigger, **kwargs): """List NSXv edges from Neutron DB""" edges = get_router_edge_bindings() LOG.info(formatters.output_formatter( constants.EDGES, edges, ['edge_id', 'router_id', 'availability_zone', 'status'])) @admin_utils.output_header def clean_orphaned_router_bindings(resource, event, trigger, **kwargs): """Delete nsx router bindings entries without real objects behind them""" orphaned_list = get_orphaned_router_bindings() if not len(orphaned_list): LOG.info("No orphaned Router bindings found.") return LOG.info("Before delete; Orphaned Bindings:") LOG.info(formatters.output_formatter( constants.ORPHANED_BINDINGS, orphaned_list, ['edge_id', 'router_id', 'availability_zone', 'status'])) if not kwargs.get('force'): if len(orphaned_list): user_confirm = admin_utils.query_yes_no("Do you want to delete " "orphaned bindings", default="no") if not user_confirm: LOG.info("NSXv Router bindings deletion aborted by user") return edgeapi = utils.NeutronDbClient() for binding in orphaned_list: nsxv_db.delete_nsxv_router_binding( edgeapi.context.session, binding.router_id) LOG.info("Deleted %s orphaned router bindings. You may need to check for " "orphaned edges now.", len(orphaned_list)) @admin_utils.output_header def list_orphaned_router_bindings(resource, event, trigger, **kwargs): """List nsx router bindings entries without real objects behind them""" orphaned_list = get_orphaned_router_bindings() LOG.info(formatters.output_formatter( constants.ORPHANED_BINDINGS, orphaned_list, ['edge_id', 'router_id', 'availability_zone', 'status'])) def get_orphaned_router_bindings(): context = n_context.get_admin_context() orphaned_list = [] with utils.NsxVPluginWrapper() as plugin: networks = plugin.get_networks(context, fields=['id']) net_ids = [x['id'] for x in networks] routers = plugin.get_routers(context, fields=['id']) rtr_ids = [x['id'] for x in routers] for binding in get_router_edge_bindings(): if not router_binding_obj_exist(context, binding, net_ids, rtr_ids): orphaned_list.append(binding) return orphaned_list def _get_obj_id_from_binding(router_id, prefix): """Return the id part of the router-binding router-id field""" return router_id[len(prefix):] def _is_id_prefix_in_list(id_prefix, ids): """Return True if the id_prefix is the prefix of one of the ids""" for x in ids: if x.startswith(id_prefix): return True return False def router_binding_obj_exist(context, binding, net_ids, rtr_ids): """Check if the object responsible for the router binding entry exists Check if the relevant router/network/loadbalancer exists in the neutron DB """ router_id = binding.router_id if router_id.startswith(vcns_const.BACKUP_ROUTER_PREFIX): # no neutron object that should match backup edges return True if router_id.startswith(vcns_const.DHCP_EDGE_PREFIX): # should have a network starting with this id # get the id. and look for a network with this id net_id_prefix = _get_obj_id_from_binding( router_id, vcns_const.DHCP_EDGE_PREFIX) if _is_id_prefix_in_list(net_id_prefix, net_ids): return True else: LOG.warning("Network for binding entry %s not found", router_id) return False if router_id.startswith(vcns_const.PLR_EDGE_PREFIX): # should have a distributed router starting with this id # get the id. and look for a network with this id rtr_id_prefix = _get_obj_id_from_binding( router_id, vcns_const.PLR_EDGE_PREFIX) if _is_id_prefix_in_list(rtr_id_prefix, rtr_ids): return True else: LOG.warning("Router for binding entry %s not found", router_id) return False if router_id.startswith(lb_common.RESOURCE_ID_PFX): # should have a load balancer starting with this id on the same edge if nsxv_db.get_nsxv_lbaas_loadbalancer_binding_by_edge( context.session, binding.edge_id): return True else: LOG.warning("Loadbalancer for binding entry %s not found", router_id) return False # regular router # get the id. and look for a router with this id if _is_id_prefix_in_list(router_id, rtr_ids): return True else: LOG.warning("Router for binding entry %s not found", router_id) return False def get_orphaned_edges(): nsxv_edge_ids = set() for edge in utils.get_nsxv_backend_edges(): nsxv_edge_ids.add(edge.get('id')) neutron_edge_bindings = set() for binding in get_router_edge_bindings(): neutron_edge_bindings.add(binding.edge_id) return nsxv_edge_ids - neutron_edge_bindings @admin_utils.output_header def nsx_list_orphaned_edges(resource, event, trigger, **kwargs): """List orphaned Edges on NSXv. Orphaned edges are NSXv edges that exist on NSXv backend but don't have a corresponding binding in Neutron DB """ LOG.info("NSXv edges present on NSXv backend but not present " "in Neutron DB\n") orphaned_edges = get_orphaned_edges() if not orphaned_edges: LOG.info("\nNo orphaned edges found." "\nNeutron DB and NSXv backend are in sync\n") else: LOG.info(constants.ORPHANED_EDGES) data = [('edge_id',)] for edge in orphaned_edges: data.append((edge,)) LOG.info(formatters.tabulate_results(data)) @admin_utils.output_header def nsx_delete_orphaned_edges(resource, event, trigger, **kwargs): """Delete orphaned edges from NSXv backend""" orphaned_edges = get_orphaned_edges() LOG.info("Before delete; Orphaned Edges: %s", orphaned_edges) if not kwargs.get('force'): if len(orphaned_edges): user_confirm = admin_utils.query_yes_no("Do you want to delete " "orphaned edges", default="no") if not user_confirm: LOG.info("NSXv Edge deletion aborted by user") return nsxv = utils.get_nsxv_client() for edge in orphaned_edges: LOG.info("Deleting edge: %s", edge) nsxv.delete_edge(edge) LOG.info("After delete; Orphaned Edges: \n%s", pprint.pformat(get_orphaned_edges())) def get_missing_edges(): nsxv_edge_ids = set() for edge in utils.get_nsxv_backend_edges(): nsxv_edge_ids.add(edge.get('id')) neutron_edge_bindings = set() for binding in get_router_edge_bindings(): neutron_edge_bindings.add(binding.edge_id) return neutron_edge_bindings - nsxv_edge_ids def get_router_edge_vnic_bindings(edge_id): edgeapi = utils.NeutronDbClient() return nsxv_db.get_edge_vnic_bindings_by_edge( edgeapi.context.session, edge_id) @admin_utils.output_header def nsx_list_missing_edges(resource, event, trigger, **kwargs): """List missing edges and networks serviced by those edges. Missing edges are NSXv edges that have a binding in Neutron DB but are currently missing from the NSXv backend. """ LOG.info("NSXv edges present in Neutron DB but not present " "on the NSXv backend\n") missing_edges = get_missing_edges() if not missing_edges: LOG.info("\nNo edges are missing." "\nNeutron DB and NSXv backend are in sync\n") else: data = [('edge_id', 'network_id')] for edge in missing_edges: # Retrieve all networks which are serviced by this edge. edge_serviced_networks = get_router_edge_vnic_bindings(edge) if not edge_serviced_networks: # If the edge is missing on the backend but no network # is serviced by this edge, output N/A. data.append((edge, 'N/A')) for bindings in edge_serviced_networks: data.append((edge, bindings.network_id)) LOG.info(formatters.tabulate_results(data)) def change_edge_ha(ha, edge_id): request = { 'featureType': 'highavailability_4.0', 'enabled': ha} try: nsxv.enable_ha(edge_id, request) except nsxv_exceptions.ResourceNotFound as e: LOG.error("Edge %s not found", edge_id) except exceptions.NeutronException as e: LOG.error("%s", str(e)) def change_edge_syslog(properties): request = { 'featureType': 'syslog', 'serverAddresses': {'ipAddress': [], 'type': 'IpAddressesDto'}} request['protocol'] = properties.get('syslog-proto', 'tcp') if request['protocol'] not in ['tcp', 'udp']: LOG.error("Property value error: syslog-proto must be tcp/udp") return if properties.get('syslog-server'): request['serverAddresses']['ipAddress'].append( properties.get('syslog-server')) if properties.get('syslog-server2'): request['serverAddresses']['ipAddress'].append( properties.get('syslog-server2')) edge_id = properties.get('edge-id') try: nsxv.update_edge_syslog(edge_id, request) except nsxv_exceptions.ResourceNotFound as e: LOG.error("Edge %s not found", edge_id) except exceptions.NeutronException as e: LOG.error("%s", str(e)) def delete_edge_syslog(edge_id): try: nsxv.delete_edge_syslog(edge_id) except nsxv_exceptions.ResourceNotFound as e: LOG.error("Edge %s not found", edge_id) except exceptions.NeutronException as e: LOG.error("%s", str(e)) def change_edge_loglevel(properties): """Update log level on edge Update log level either for specific module or for all modules. 'none' disables logging, any other level enables logging Returns True if found any log level properties (regardless if action succeeded) """ modules = {} if properties.get('log-level'): level = properties.get('log-level') # change log level for all modules modules = {k: level for k in edge_utils.SUPPORTED_EDGE_LOG_MODULES} else: # check for log level settings for specific modules for k, v in properties.items(): if k.endswith('-log-level'): module = k[:-10] # module is in parameter prefix modules[module] = v if not modules: # no log level properties return False edge_id = properties.get('edge-id') for module, level in modules.items(): if level == 'none': LOG.info("Disabling logging for %s", module) else: LOG.info("Enabling logging for %(m)s with level %(l)s", {'m': module, 'l': level}) try: edge_utils.update_edge_loglevel(nsxv, edge_id, module, level) except nsxv_exceptions.ResourceNotFound as e: LOG.error("Edge %s not found", edge_id) except exceptions.NeutronException as e: LOG.error("%s", str(e)) # take ownership for properties return True def change_edge_appliance_size(properties): size = properties.get('size') if size not in vcns_const.ALLOWED_EDGE_SIZES: LOG.error("Edge appliance size not in %(size)s", {'size': vcns_const.ALLOWED_EDGE_SIZES}) return try: nsxv.change_edge_appliance_size( properties.get('edge-id'), size) except nsxv_exceptions.ResourceNotFound as e: LOG.error("Edge %s not found", properties.get('edge-id')) except exceptions.NeutronException as e: LOG.error("%s", str(e)) def _get_edge_az_and_size(edge_id): edgeapi = utils.NeutronDbClient() binding = nsxv_db.get_nsxv_router_binding_by_edge( edgeapi.context.session, edge_id) if binding: return binding['availability_zone'], binding['appliance_size'] # default fallback return nsx_az.DEFAULT_NAME, nsxv_constants.LARGE def change_edge_appliance(edge_id): """Update the appliances data of an edge Update the edge appliances data according to its current availability zone and the nsx.ini config, including the resource pool, edge_ha, datastore & ha_datastore. The availability zone of the edge will not be changed. This can be useful when the global resource pool/datastore/edge ha configuration is updated, or when the configuration of a specific availability zone was updated. """ # find out what is the current resource pool & size, so we can keep them az_name, size = _get_edge_az_and_size(edge_id) config.register_nsxv_azs(cfg.CONF, cfg.CONF.nsxv.availability_zones) az = nsx_az.NsxVAvailabilityZones().get_availability_zone(az_name) appliances = [{'resourcePoolId': az.resource_pool, 'datastoreId': az.datastore_id}] if az.ha_datastore_id and az.edge_ha: appliances.append({'resourcePoolId': az.resource_pool, 'datastoreId': az.ha_datastore_id}) request = {'appliances': appliances, 'applianceSize': size} try: nsxv.change_edge_appliance(edge_id, request) except nsxv_exceptions.ResourceNotFound as e: LOG.error("Edge %s not found", edge_id) except exceptions.NeutronException as e: LOG.error("%s", str(e)) else: # also update the edge_ha of the edge change_edge_ha(az.edge_ha, edge_id) def change_edge_appliance_reservations(properties): reservations = {} res = {} if properties.get('limit'): res['limit'] = properties.get('limit') if properties.get('reservation'): res['reservation'] = properties.get('reservation') if properties.get('shares'): res['shares'] = properties.get('shares') resource = properties.get('resource') if not res: LOG.error("Please configure reservations") return if resource == 'cpu': reservations['cpuReservation'] = res elif resource == 'memory': reservations['memoryReservation'] = res else: LOG.error("Please configure resource") return edge_id = properties.get('edge-id') try: h, edge = nsxv.get_edge(edge_id) except exceptions.NeutronException as e: LOG.error("%s", str(e)) return appliances = edge['appliances']['appliances'] for appliance in appliances: appliance.update(reservations) request = {'appliances': appliances} try: nsxv.change_edge_appliance(edge_id, request) except nsxv_exceptions.ResourceNotFound as e: LOG.error("Edge %s not found", edge_id) except exceptions.NeutronException as e: LOG.error("%s", str(e)) def _update_host_group_for_edge(nsxv, cluster_mng, edge_id, edge): if edge.get('type') == 'gatewayServices': try: az_name, size = _get_edge_az_and_size(edge_id) config.register_nsxv_azs(cfg.CONF, cfg.CONF.nsxv.availability_zones) zones = nsx_az.NsxVAvailabilityZones() az = zones.get_availability_zone(az_name) if az.edge_ha and az.edge_host_groups: edge_utils.update_edge_host_groups(nsxv, edge_id, cluster_mng, az, validate=True) else: LOG.error("%s does not have HA enabled or no host " "groups defined. Skipping %s.", az_name, edge_id) except Exception as e: LOG.error("Failed to update edge %(id)s - %(e)s", {'id': edge['id'], 'e': e}) else: LOG.error("%s is not a gateway services", edge_id) def change_edge_hostgroup(properties): cluster_mng = dvs.ClusterManager() if properties.get('hostgroup').lower() == "update": edge_id = properties.get('edge-id') try: edge_result = nsxv.get_edge(edge_id) except exceptions.NeutronException as x: LOG.error("%s", str(x)) else: # edge_result[0] is response status code # edge_result[1] is response body edge = edge_result[1] _update_host_group_for_edge(nsxv, cluster_mng, edge_id, edge) elif properties.get('hostgroup').lower() == "all": edges = utils.get_nsxv_backend_edges() for edge in edges: edge_id = edge['id'] _update_host_group_for_edge(nsxv, cluster_mng, edge_id, edge) elif properties.get('hostgroup').lower() == "clean": config.register_nsxv_azs(cfg.CONF, cfg.CONF.nsxv.availability_zones) azs = nsx_az.NsxVAvailabilityZones() for az in azs.list_availability_zones_objects(): try: edge_utils.clean_host_groups(cluster_mng, az) except Exception: LOG.error("Failed to clean AZ %s", az.name) else: LOG.error('Currently not supported') @admin_utils.output_header def nsx_update_edge(resource, event, trigger, **kwargs): """Update edge properties""" usage_msg = ("Need to specify edge-id parameter and " "attribute to update. Add --property edge-id= " "and --property highavailability= or " "--property size= or --property appliances=True. " "\nFor syslog, add --property syslog-server=|none and " "(optional) --property syslog-server2= and/or " "(optional) --property syslog-proto=[tcp/udp] " "\nFor log levels, add --property [routing|dhcp|dns|" "highavailability|loadbalancer]-log-level=" "[debug|info|warning|error]. To set log level for all " "modules, add --property log-level= " "\nFor edge reservations, add " "--property resource=cpu|memory and " "(optional) --property limit= and/or " "(optional) --property shares= and/or " "(optional) --property reservation= " "\nFor hostgroup updates, add " "--property hostgroup=update/all/clean") if not kwargs.get('property'): LOG.error(usage_msg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) if (not properties.get('edge-id') and not properties.get('hostgroup', '').lower() == "all" and not properties.get('hostgroup', '').lower() == "clean"): LOG.error("Need to specify edge-id. " "Add --property edge-id=") return LOG.info("Updating NSXv edge: %(edge)s with properties\n%(prop)s", {'edge': properties.get('edge-id'), 'prop': properties}) if properties.get('highavailability'): change_edge_ha(properties['highavailability'].lower() == "true", properties['edge-id']) elif properties.get('size'): change_edge_appliance_size(properties) elif (properties.get('appliances') and properties.get('appliances').lower() == "true"): change_edge_appliance(properties['edge-id']) elif properties.get('syslog-server'): if (properties.get('syslog-server').lower() == "none"): delete_edge_syslog(properties['edge-id']) else: change_edge_syslog(properties) elif properties.get('resource'): change_edge_appliance_reservations(properties) elif properties.get('hostgroup'): change_edge_hostgroup(properties) elif change_edge_loglevel(properties): pass else: # no attribute was specified LOG.error(usage_msg) @admin_utils.output_header def nsx_update_edges(resource, event, trigger, **kwargs): """Update all edges with the given property""" if not kwargs.get('property'): usage_msg = ("Need to specify a property to update all edges. " "Add --property appliances=") LOG.error(usage_msg) return edges = utils.get_nsxv_backend_edges() properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) result = 0 for edge in edges: if properties.get('appliances', 'false').lower() == "true": try: change_edge_appliance(edge.get('edge-id')) except Exception as e: result += 1 LOG.error("Failed to update edge %(edge)s. Exception: " "%(e)s", {'edge': edge.get('edge-id'), 'e': str(e)}) if result > 0: total = len(edges) LOG.error("%(result)s of %(total)s edges failed " "to update.", {'result': result, 'total': total}) registry.subscribe(nsx_list_edges, constants.EDGES, shell.Operations.NSX_LIST.value) registry.subscribe(neutron_list_router_edge_bindings, constants.EDGES, shell.Operations.NEUTRON_LIST.value) registry.subscribe(nsx_list_orphaned_edges, constants.ORPHANED_EDGES, shell.Operations.LIST.value) registry.subscribe(nsx_delete_orphaned_edges, constants.ORPHANED_EDGES, shell.Operations.CLEAN.value) registry.subscribe(nsx_list_missing_edges, constants.MISSING_EDGES, shell.Operations.LIST.value) registry.subscribe(nsx_update_edge, constants.EDGES, shell.Operations.NSX_UPDATE.value) registry.subscribe(nsx_update_edges, constants.EDGES, shell.Operations.NSX_UPDATE_ALL.value) registry.subscribe(list_orphaned_router_bindings, constants.ORPHANED_BINDINGS, shell.Operations.LIST.value) registry.subscribe(clean_orphaned_router_bindings, constants.ORPHANED_BINDINGS, shell.Operations.CLEAN.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv/resources/securitygroups.py0000666000175100017510000004367013244523345030511 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import xml.etree.ElementTree as et from neutron.db import api as db_api from neutron.db.models import securitygroup as sg_models from neutron.db import models_v2 from neutron.db import securitygroups_db from neutron.extensions import securitygroup as ext_sg from neutron_lib.callbacks import registry from neutron_lib import context as n_context from oslo_log import log as logging from vmware_nsx.common import utils as com_utils from vmware_nsx.db import db as nsx_db from vmware_nsx.db import extended_security_group as extended_secgroup from vmware_nsx.db import extended_security_group_rule as extend_sg_rule from vmware_nsx.db import nsx_models from vmware_nsx.db import nsxv_db from vmware_nsx.db import nsxv_models from vmware_nsx.extensions import securitygrouppolicy as sg_policy from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv.resources import utils from vmware_nsx.shell import resources as shell LOG = logging.getLogger(__name__) class NeutronSecurityGroupDB( utils.NeutronDbClient, securitygroups_db.SecurityGroupDbMixin, extended_secgroup.ExtendedSecurityGroupPropertiesMixin, extend_sg_rule.ExtendedSecurityGroupRuleMixin): def __init__(self): super(NeutronSecurityGroupDB, self) # FIXME(roeyc): context is already defined in NeutrondDbClient self.context = n_context.get_admin_context() def get_security_groups_mappings(self): q = self.context.session.query( sg_models.SecurityGroup.name, sg_models.SecurityGroup.id, nsxv_models.NsxvSecurityGroupSectionMapping.ip_section_id, nsx_models.NeutronNsxSecurityGroupMapping.nsx_id).join( nsxv_models.NsxvSecurityGroupSectionMapping, nsx_models.NeutronNsxSecurityGroupMapping).all() sg_mappings = [{'name': mapp.name, 'id': mapp.id, 'section-uri': mapp.ip_section_id, 'nsx-securitygroup-id': mapp.nsx_id} for mapp in q] return sg_mappings def get_security_group(self, sg_id): return super(NeutronSecurityGroupDB, self).get_security_group( self.context, sg_id) def get_security_groups(self): filters = utils.get_plugin_filters(self.context) return super(NeutronSecurityGroupDB, self).get_security_groups(self.context, filters=filters) def get_security_group_id_by_section_id(self, section_id): section_url = ("/api/4.0/firewall/globalroot-0/config/layer3sections" "/%s" % section_id) q = self.context.session.query( nsxv_models.NsxvSecurityGroupSectionMapping).filter_by( ip_section_id=section_url).all() if q: return q[0].neutron_id def _is_provider_section(self, section_id): # look for this section id in the nsx_db, and get the security group sg_id = self.get_security_group_id_by_section_id(section_id) if sg_id: # Check in the DB if this is a provider SG return self._is_provider_security_group(self.context, sg_id) return False def delete_security_group_section_mapping(self, sg_id): with db_api.context_manager.writer.using(self.context): fw_mapping = self.context.session.query( nsxv_models.NsxvSecurityGroupSectionMapping).filter_by( neutron_id=sg_id).one_or_none() if fw_mapping: self.context.session.delete(fw_mapping) def delete_security_group_backend_mapping(self, sg_id): with db_api.context_manager.writer.using(self.context): sg_mapping = self.context.session.query( nsx_models.NeutronNsxSecurityGroupMapping).filter_by( neutron_id=sg_id).one_or_none() if sg_mapping: self.context.session.delete(sg_mapping) def get_vnics_in_security_group(self, security_group_id): with utils.NsxVPluginWrapper() as plugin: vnics = [] query = self.context.session.query( models_v2.Port.id, models_v2.Port.device_id ).join(sg_models.SecurityGroupPortBinding).filter_by( security_group_id=security_group_id).all() for p in query: vnic_index = plugin._get_port_vnic_index(self.context, p.id) vnic_id = plugin._get_port_vnic_id(vnic_index, p.device_id) vnics.append(vnic_id) return vnics class NsxFirewallAPI(object): def __init__(self): self.vcns = utils.get_nsxv_client() def list_security_groups(self): h, secgroups = self.vcns.list_security_groups() if not secgroups: return [] root = et.fromstring(secgroups) secgroups = [] for sg in root.iter('securitygroup'): sg_id = sg.find('objectId').text # This specific security-group is not relevant to the plugin if sg_id == 'securitygroup-1': continue secgroups.append({'name': sg.find('name').text, 'id': sg_id}) return secgroups def list_fw_sections(self): h, firewall_config = self.vcns.get_dfw_config() if not firewall_config: return [] root = com_utils.normalize_xml(firewall_config) sections = [] for sec in root.iter('section'): sec_id = sec.attrib['id'] # Don't show NSX default sections, which are not relevant to OS. if sec_id in ['1001', '1002', '1003']: continue sections.append({'name': sec.attrib['name'], 'id': sec_id}) return sections def reorder_fw_sections(self): # read all the sections h, firewall_config = self.vcns.get_dfw_config() if not firewall_config: LOG.info("No firewall sections were found.") return root = com_utils.normalize_xml(firewall_config) for child in root: if str(child.tag) == 'layer3Sections': # go over the L3 sections and reorder them. # The correct order should be: # 1. OS provider security groups # 2. service composer policies # 3. regular OS security groups sections = list(child.iter('section')) provider_sections = [] regular_sections = [] policy_sections = [] for sec in sections: if sec.attrib.get('managedBy') == 'NSX Service Composer': policy_sections.append(sec) else: if neutron_sg._is_provider_section( sec.attrib.get('id')): provider_sections.append(sec) else: regular_sections.append(sec) child.remove(sec) if not policy_sections and not provider_sections: LOG.info("No need to reorder the firewall sections.") return # reorder the sections reordered_sections = (provider_sections + policy_sections + regular_sections) child.extend(reordered_sections) # update the new order of sections in the backend self.vcns.update_dfw_config(et.tostring(root), h) LOG.info("L3 Firewall sections were reordered.") neutron_sg = NeutronSecurityGroupDB() nsxv_firewall = NsxFirewallAPI() def _log_info(resource, data, attrs=['name', 'id']): LOG.info(formatters.output_formatter(resource, data, attrs)) @admin_utils.list_handler(constants.SECURITY_GROUPS) @admin_utils.output_header def neutron_list_security_groups_mappings(resource, event, trigger, **kwargs): sg_mappings = neutron_sg.get_security_groups_mappings() _log_info(constants.SECURITY_GROUPS, sg_mappings, attrs=['name', 'id', 'section-uri', 'nsx-securitygroup-id']) return bool(sg_mappings) @admin_utils.list_handler(constants.FIREWALL_SECTIONS) @admin_utils.output_header def nsx_list_dfw_sections(resource, event, trigger, **kwargs): fw_sections = nsxv_firewall.list_fw_sections() _log_info(constants.FIREWALL_SECTIONS, fw_sections) return bool(fw_sections) @admin_utils.list_handler(constants.FIREWALL_NSX_GROUPS) @admin_utils.output_header def nsx_list_security_groups(resource, event, trigger, **kwargs): nsx_secgroups = nsxv_firewall.list_security_groups() _log_info(constants.FIREWALL_NSX_GROUPS, nsx_secgroups) return bool(nsx_secgroups) def _find_missing_security_groups(): nsx_secgroups = nsxv_firewall.list_security_groups() sg_mappings = neutron_sg.get_security_groups_mappings() missing_secgroups = {} for sg_db in sg_mappings: for nsx_sg in nsx_secgroups: if nsx_sg['id'] == sg_db['nsx-securitygroup-id']: break else: missing_secgroups[sg_db['id']] = sg_db return missing_secgroups @admin_utils.list_mismatches_handler(constants.FIREWALL_NSX_GROUPS) @admin_utils.output_header def list_missing_security_groups(resource, event, trigger, **kwargs): sgs_with_missing_nsx_group = _find_missing_security_groups() missing_securitgroups_info = [ {'securitygroup-name': sg['name'], 'securitygroup-id': sg['id'], 'nsx-securitygroup-id': sg['nsx-securitygroup-id']} for sg in sgs_with_missing_nsx_group.values()] _log_info(constants.FIREWALL_NSX_GROUPS, missing_securitgroups_info, attrs=['securitygroup-name', 'securitygroup-id', 'nsx-securitygroup-id']) return bool(missing_securitgroups_info) def _find_missing_sections(): fw_sections = nsxv_firewall.list_fw_sections() sg_mappings = neutron_sg.get_security_groups_mappings() missing_sections = {} for sg_db in sg_mappings: for fw_section in fw_sections: if fw_section['id'] == sg_db.get('section-uri', '').split('/')[-1]: break else: missing_sections[sg_db['id']] = sg_db return missing_sections @admin_utils.list_mismatches_handler(constants.FIREWALL_SECTIONS) @admin_utils.output_header def list_missing_firewall_sections(resource, event, trigger, **kwargs): sgs_with_missing_section = _find_missing_sections() missing_sections_info = [{'securitygroup-name': sg['name'], 'securitygroup-id': sg['id'], 'section-id': sg['section-uri']} for sg in sgs_with_missing_section.values()] _log_info(constants.FIREWALL_SECTIONS, missing_sections_info, attrs=['securitygroup-name', 'securitygroup-id', 'section-uri']) return bool(missing_sections_info) @admin_utils.list_mismatches_handler(constants.FIREWALL_SECTIONS) @admin_utils.output_header def reorder_firewall_sections(resource, event, trigger, **kwargs): nsxv_firewall.reorder_fw_sections() @admin_utils.fix_mismatches_handler(constants.SECURITY_GROUPS) @admin_utils.output_header def fix_security_groups(resource, event, trigger, **kwargs): context_ = n_context.get_admin_context() sgs_with_missing_section = _find_missing_sections() sgs_with_missing_nsx_group = _find_missing_security_groups() if not sgs_with_missing_section and not sgs_with_missing_nsx_group: # no mismatches return with utils.NsxVPluginWrapper() as plugin: # If only the fw section is missing then create it. for sg_id in (set(sgs_with_missing_section.keys()) - set(sgs_with_missing_nsx_group.keys())): neutron_sg.delete_security_group_section_mapping(sg_id) secgroup = plugin.get_security_group(context_, sg_id) plugin._create_fw_section_for_security_group( context_, secgroup, sgs_with_missing_section[sg_id]['nsx-securitygroup-id']) # If nsx security-group is missing then create both nsx security-group # and a new fw section (remove old one). for sg_id, sg in sgs_with_missing_nsx_group.items(): secgroup = plugin.get_security_group(context_, sg_id) if sg_id not in sgs_with_missing_section: plugin._delete_section(sg['section-uri']) neutron_sg.delete_security_group_section_mapping(sg_id) neutron_sg.delete_security_group_backend_mapping(sg_id) plugin._process_security_group_create_backend_resources(context_, secgroup) nsx_id = nsx_db.get_nsx_security_group_id(context_.session, sg_id, moref=False) for vnic_id in neutron_sg.get_vnics_in_security_group(sg_id): plugin._add_member_to_security_group(nsx_id, vnic_id) @admin_utils.output_header def migrate_sg_to_policy(resource, event, trigger, **kwargs): """Change the mode of a security group from rules to NSX policy""" if not kwargs.get('property'): LOG.error("Need to specify security-group-id and policy-id " "parameters") return # input validation properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) sg_id = properties.get('security-group-id') if not sg_id: LOG.error("Need to specify security-group-id parameter") return policy_id = properties.get('policy-id') if not policy_id: LOG.error("Need to specify policy-id parameter") return # validate that the security group exist and contains rules and no policy context_ = n_context.get_admin_context() with utils.NsxVPluginWrapper() as plugin: try: secgroup = plugin.get_security_group(context_, sg_id) except ext_sg.SecurityGroupNotFound: LOG.error("Security group %s was not found", sg_id) return if secgroup.get('policy'): LOG.error("Security group %s already uses a policy", sg_id) return # validate that the policy exists if not plugin.nsx_v.vcns.validate_inventory(policy_id): LOG.error("NSX policy %s was not found", policy_id) return # get the nsx id from the backend nsx_sg_id = nsx_db.get_nsx_security_group_id(context_.session, sg_id, moref=True) if not nsx_sg_id: LOG.error("Did not find security groups %s neutron ID", sg_id) return # Delete the rules from the security group LOG.info("Deleting the rules of security group: %s", sg_id) for rule in secgroup.get('security_group_rules', []): try: plugin.delete_security_group_rule(context_, rule['id']) except Exception as e: LOG.warning("Failed to delete rule %(r)s from security " "group %(sg)s: %(e)s", {'r': rule['id'], 'sg': sg_id, 'e': e}) # continue anyway # Delete the security group FW section LOG.info("Deleting the section of security group: %s", sg_id) try: section_uri = plugin._get_section_uri(context_.session, sg_id) plugin._delete_section(section_uri) nsxv_db.delete_neutron_nsx_section_mapping( context_.session, sg_id) except Exception as e: LOG.warning("Failed to delete firewall section of security " "group %(sg)s: %(e)s", {'sg': sg_id, 'e': e}) # continue anyway # bind this security group to the policy in the backend and DB LOG.info("Binding the NSX security group %(nsx)s to policy " "%(pol)s", {'nsx': nsx_sg_id, 'pol': policy_id}) plugin._update_nsx_security_group_policies( policy_id, None, nsx_sg_id) with context_.session.begin(subtransactions=True): prop = context_.session.query( extended_secgroup.NsxExtendedSecurityGroupProperties).\ filter_by(security_group_id=sg_id).one() prop[sg_policy.POLICY] = policy_id LOG.info("Done.") @admin_utils.output_header def firewall_update_cluster_default_fw_section(resource, event, trigger, **kwargs): with utils.NsxVPluginWrapper() as plugin: plugin._create_cluster_default_fw_section() LOG.info("Cluster default FW section updated.") registry.subscribe(migrate_sg_to_policy, constants.SECURITY_GROUPS, shell.Operations.MIGRATE_TO_POLICY.value) registry.subscribe(reorder_firewall_sections, constants.FIREWALL_SECTIONS, shell.Operations.NSX_REORDER.value) registry.subscribe(fix_security_groups, constants.FIREWALL_SECTIONS, shell.Operations.NSX_UPDATE.value) registry.subscribe(firewall_update_cluster_default_fw_section, constants.FIREWALL_SECTIONS, shell.Operations.NSX_UPDATE.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv/resources/metadata.py0000666000175100017510000002243213244523345027153 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import hmac from neutron.db import models_v2 from neutron_lib.callbacks import registry from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.common import config from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v import md_proxy from vmware_nsx.plugins.nsx_v.vshield.common import constants as vcns_constants from vmware_nsx.plugins.nsx_v.vshield import nsxv_loadbalancer as nsxv_lb from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv.resources import utils as utils from vmware_nsx.shell import resources as shell LOG = logging.getLogger(__name__) nsxv = utils.get_nsxv_client() @admin_utils.output_header def nsx_redo_metadata_cfg(resource, event, trigger, **kwargs): edgeapi = utils.NeutronDbClient() config.register_nsxv_azs(cfg.CONF, cfg.CONF.nsxv.availability_zones) conf_az = nsx_az.NsxVAvailabilityZones() az_list = conf_az.list_availability_zones_objects() for az in az_list: if az.supports_metadata(): nsx_redo_metadata_cfg_for_az(az, edgeapi) else: LOG.info("Skipping availability zone: %s - no metadata " "configuration", az.name) def nsx_redo_metadata_cfg_for_az(az, edgeapi): LOG.info("Updating MetaData for availability zone: %s", az.name) # Get the list of internal networks for this AZ db_net = nsxv_db.get_nsxv_internal_network_for_az( edgeapi.context.session, vcns_constants.InternalEdgePurposes.INTER_EDGE_PURPOSE, az.name) internal_net = None internal_subnet = None if db_net: internal_net = db_net['network_id'] internal_subnet = edgeapi.context.session.query( models_v2.Subnet).filter_by( network_id=internal_net).first().get('id') # Get the list of internal edges for this AZ edge_list = nsxv_db.get_nsxv_internal_edges_by_purpose( edgeapi.context.session, vcns_constants.InternalEdgePurposes.INTER_EDGE_PURPOSE) edge_az_list = [edge for edge in edge_list if nsxv_db.get_router_availability_zone( edgeapi.context.session, edge['router_id']) == az.name] md_rtr_ids = [edge['router_id'] for edge in edge_az_list] edge_internal_ips = [] for edge in edge_az_list: edge_internal_port = edgeapi.context.session.query( models_v2.Port).filter_by(network_id=internal_net, device_id=edge['router_id']).first() if edge_internal_port: edge_internal_ip = edgeapi.context.session.query( models_v2.IPAllocation).filter_by( port_id=edge_internal_port['id']).first() edge_internal_ips.append(edge_internal_ip['ip_address']) if not internal_net or not internal_subnet or not edge_internal_ips: LOG.error("Metadata infrastructure is missing or broken. " "It is recommended to restart neutron service before " "proceeding with configuration restoration") return router_bindings = nsxv_db.get_nsxv_router_bindings( edgeapi.context.session, filters={'edge_type': [nsxv_constants.SERVICE_EDGE], 'availability_zones': az.name}) edge_ids = list(set([binding['edge_id'] for binding in router_bindings if (binding['router_id'] not in set(md_rtr_ids) and not binding['router_id'].startswith( vcns_constants.BACKUP_ROUTER_PREFIX) and not binding['router_id'].startswith( vcns_constants.PLR_EDGE_PREFIX))])) for edge_id in edge_ids: with locking.LockManager.get_lock(edge_id): lb = nsxv_lb.NsxvLoadbalancer.get_loadbalancer(nsxv, edge_id) virt = lb.virtual_servers.get(md_proxy.METADATA_VSE_NAME) if virt: pool = virt.default_pool pool.members = {} i = 0 s_port = cfg.CONF.nsxv.nova_metadata_port for member_ip in edge_internal_ips: i += 1 member = nsxv_lb.NsxvLBPoolMember( name='Member-%d' % i, ip_address=member_ip, port=s_port, monitor_port=s_port) pool.add_member(member) lb.submit_to_backend(nsxv, edge_id) @admin_utils.output_header def update_shared_secret(resource, event, trigger, **kwargs): edgeapi = utils.NeutronDbClient() edge_list = nsxv_db.get_nsxv_internal_edges_by_purpose( edgeapi.context.session, vcns_constants.InternalEdgePurposes.INTER_EDGE_PURPOSE) md_rtr_ids = [edge['router_id'] for edge in edge_list] router_bindings = nsxv_db.get_nsxv_router_bindings( edgeapi.context.session, filters={'edge_type': [nsxv_constants.SERVICE_EDGE]}) edge_ids = list(set([binding['edge_id'] for binding in router_bindings if (binding['router_id'] not in set(md_rtr_ids) and not binding['router_id'].startswith( vcns_constants.BACKUP_ROUTER_PREFIX) and not binding['router_id'].startswith( vcns_constants.PLR_EDGE_PREFIX))])) for edge_id in edge_ids: with locking.LockManager.get_lock(edge_id): lb = nsxv_lb.NsxvLoadbalancer.get_loadbalancer(nsxv, edge_id) virt = lb.virtual_servers.get(md_proxy.METADATA_VSE_NAME) if not virt: LOG.error("Virtual server not found for edge: %s", edge_id) continue virt.del_app_rule('insert-auth') if cfg.CONF.nsxv.metadata_shared_secret: signature = hmac.new(cfg.CONF.nsxv.metadata_shared_secret, edge_id, hashlib.sha256).hexdigest() sign = 'reqadd X-Metadata-Provider-Signature:' + signature sign_app_rule = nsxv_lb.NsxvLBAppRule('insert-auth', sign) virt.add_app_rule(sign_app_rule) lb.submit_to_backend(nsxv, edge_id) def _md_member_status(title, edge_ids): for edge_id in edge_ids: lb_stats = nsxv.get_loadbalancer_statistics( edge_id) pools_stats = lb_stats[1].get('pool', []) members = [] for pool_stats in pools_stats: if pool_stats['name'] == md_proxy.METADATA_POOL_NAME: for member in pool_stats.get('member', []): members.append({'member_ip': member['ipAddress'], 'member_status': member['status']}) LOG.info(formatters.output_formatter( title % edge_id, members, ['member_ip', 'member_status'])) @admin_utils.output_header def get_metadata_status(resource, event, trigger, **kwargs): if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) net_id = properties.get('network_id') else: net_id = None edgeapi = utils.NeutronDbClient() edge_list = nsxv_db.get_nsxv_internal_edges_by_purpose( edgeapi.context.session, vcns_constants.InternalEdgePurposes.INTER_EDGE_PURPOSE) md_rtr_ids = [edge['router_id'] for edge in edge_list] router_bindings = nsxv_db.get_nsxv_router_bindings( edgeapi.context.session, filters={'router_id': md_rtr_ids}) edge_ids = [b['edge_id'] for b in router_bindings] _md_member_status('Metadata edge appliance: %s members', edge_ids) if net_id: as_provider_data = nsxv_db.get_edge_vnic_bindings_by_int_lswitch( edgeapi.context.session, net_id) providers = [asp['edge_id'] for asp in as_provider_data] if providers: LOG.info('Metadata providers for network %s', net_id) _md_member_status('Edge %s', providers) else: LOG.info('No providers found for network %s', net_id) registry.subscribe(nsx_redo_metadata_cfg, constants.METADATA, shell.Operations.NSX_UPDATE.value) registry.subscribe(update_shared_secret, constants.METADATA, shell.Operations.NSX_UPDATE_SECRET.value) registry.subscribe(get_metadata_status, constants.METADATA, shell.Operations.STATUS.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv/resources/gw_edges.py0000666000175100017510000003717313244523345027167 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.callbacks import registry from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.common import config from vmware_nsx.common import nsxv_constants from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import constants as vcns_const from vmware_nsx.plugins.nsx_v.vshield.common import exceptions from vmware_nsx.plugins.nsx_v.vshield import vcns_driver from vmware_nsx.services.dynamic_routing.nsx_v import driver as nsxv_bgp from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv.resources import utils as v_utils from vmware_nsx.shell import resources as shell LOG = logging.getLogger(__name__) MIN_ASNUM = 1 MAX_ASNUM = 65535 nsxv = vcns_driver.VcnsDriver([]) def get_ip_prefix(name, ip_address): return {'ipPrefix': {'name': name, 'ipAddress': ip_address}} def get_redistribution_rule(prefix_name, from_bgp, from_ospf, from_static, from_connected, action): rule = { 'action': action, 'from': { 'ospf': from_ospf, 'bgp': from_bgp, 'connected': from_connected, 'static': from_static } } if prefix_name: rule['prefixName'] = prefix_name return {'rule': rule} def _validate_asn(asn): if not MIN_ASNUM <= int(asn) <= MAX_ASNUM: msg = "Invalid AS number, expecting an integer value (1 - 65535)." LOG.error(msg) return False return True def _extract_interface_info(info): info = info.split(':') try: network = netaddr.IPNetwork(info[-1]) except Exception: LOG.error("Invalid IP address given: '%s'.", info) return None portgroup = info[0] subnet_mask = str(network.netmask) ip_address = str(network.ip) return portgroup, ip_address, subnet_mask def _assemble_gw_edge(name, size, external_iface_info, internal_iface_info, default_gateway, az): edge = nsxv._assemble_edge( name, datacenter_moid=az.datacenter_moid, deployment_container_id=az.datastore_id, appliance_size=size, remote_access=False, edge_ha=az.edge_ha) appliances = [nsxv._assemble_edge_appliance( az.resource_pool, az.datastore_id)] edge['appliances']['appliances'] = appliances portgroup, ip_address, subnet_mask = external_iface_info vnic_external = nsxv._assemble_edge_vnic(vcns_const.EXTERNAL_VNIC_NAME, vcns_const.EXTERNAL_VNIC_INDEX, portgroup, primary_address=ip_address, subnet_mask=subnet_mask, type="uplink") portgroup, gateway_ip, subnet_mask = internal_iface_info vnic_internal = nsxv._assemble_edge_vnic(vcns_const.INTERNAL_VNIC_NAME, vcns_const.INTERNAL_VNIC_INDEX, portgroup, primary_address=gateway_ip, subnet_mask=subnet_mask, type="internal") if (cfg.CONF.nsxv.edge_appliance_user and cfg.CONF.nsxv.edge_appliance_password): edge['cliSettings'].update({ 'userName': cfg.CONF.nsxv.edge_appliance_user, 'password': cfg.CONF.nsxv.edge_appliance_password}) edge['vnics']['vnics'].append(vnic_external) edge['vnics']['vnics'].append(vnic_internal) edge['featureConfigs']['features'] = [{'featureType': 'firewall_4.0', 'enabled': False}] if default_gateway: routing = {'featureType': 'routing_4.0', 'enabled': True, 'staticRouting': { 'defaultRoute': { 'description': 'default-gateway', 'gatewayAddress': default_gateway } }} edge['featureConfigs']['features'].append(routing) header = nsxv.vcns.deploy_edge(edge)[0] edge_id = header.get('location', '/').split('/')[-1] return edge_id, gateway_ip @admin_utils.output_header def create_bgp_gw(resource, event, trigger, **kwargs): """Creates a new BGP GW edge""" usage = ("nsxadmin -r bgp-gw-edge -o create " "--property name= " "--property local-as= " "--property external-iface=: " "--property internal-iface=: " "[--property default-gateway=] " "[--property az-hint=] " "[--property size=compact,large,xlarge,quadlarge]") required_params = ('name', 'local-as', 'internal-iface', 'external-iface') properties = admin_utils.parse_multi_keyval_opt(kwargs.get('property', [])) if not properties or not set(required_params) <= set(properties.keys()): LOG.error(usage) return local_as = properties['local-as'] if not _validate_asn(local_as): return size = properties.get('size', nsxv_constants.LARGE) if size not in vcns_const.ALLOWED_EDGE_SIZES: msg = ("Property 'size' takes one of the following values: %s." % ','.join(vcns_const.ALLOWED_EDGE_SIZES)) LOG.error(msg) return external_iface_info = _extract_interface_info(properties['external-iface']) internal_iface_info = _extract_interface_info(properties['internal-iface']) if not (external_iface_info and internal_iface_info): return if 'default-gateway' in properties: default_gw = _extract_interface_info(properties['default-gateway']) if not default_gw: msg = ("Property 'default-gateway' doesn't contain a valid IP " "address.") LOG.error(msg) return default_gw = default_gw[1] else: default_gw = None config.register_nsxv_azs(cfg.CONF, cfg.CONF.nsxv.availability_zones) az_hint = properties.get('az-hint', 'default') az = nsx_az.NsxVAvailabilityZones().get_availability_zone(az_hint) edge_id, gateway_ip = _assemble_gw_edge(properties['name'], size, external_iface_info, internal_iface_info, default_gw, az) nsxv.add_bgp_speaker_config(edge_id, gateway_ip, local_as, True, [], [], [], default_originate=True) res = {'name': properties['name'], 'edge_id': edge_id, 'size': size, 'availability_zone': az.name, 'bgp_identifier': gateway_ip, 'local_as': local_as} headers = ['name', 'edge_id', 'size', 'bgp_identifier', 'availability_zone', 'local_as'] LOG.info(formatters.output_formatter('BGP GW Edge', [res], headers)) def delete_bgp_gw(resource, event, trigger, **kwargs): usage = ("nsxadmin -r bgp-gw-edge -o delete " "--property gw-edge-id=") required_params = ('gw-edge-id', ) properties = admin_utils.parse_multi_keyval_opt(kwargs.get('property', [])) if not properties or not set(required_params) <= set(properties.keys()): LOG.error(usage) return edge_id = properties['gw-edge-id'] try: nsxv.vcns.delete_edge(edge_id) except Exception: LOG.error("Failed to delete edge %s", edge_id) return def list_bgp_edges(resource, event, trigger, **kwargs): bgp_edges = [] edges = v_utils.get_nsxv_backend_edges() for edge in edges: bgp_config = nsxv.get_routing_bgp_config(edge['id']) if bgp_config['bgp']['enabled']: bgp_edges.append({'name': edge['name'], 'edge_id': edge['id'], 'local_as': bgp_config['bgp']['localAS']}) if not bgp_edges: LOG.info("No BGP GW edges found") return headers = ['name', 'edge_id', 'local_as'] LOG.info(formatters.output_formatter(constants.EDGES, bgp_edges, headers)) @admin_utils.output_header def create_redis_rule(resource, event, trigger, **kwargs): usage = ("nsxadmin -r routing-redistribution-rule -o create " "--property gw-edge-ids=[,...] " "[--property prefix=] " "--property learn-from=ospf,bgp,connected,static " "--property action=") required_params = ('gw-edge-ids', 'learn-from', 'action') properties = admin_utils.parse_multi_keyval_opt(kwargs.get('property', [])) if not properties or not set(required_params) <= set(properties.keys()): LOG.error(usage) return prefix = properties.get('prefix') if prefix: prefix_name, cidr = prefix.split(':') prefixes = [get_ip_prefix(prefix_name, cidr)] if cidr else [] else: prefix_name = None prefixes = [] learn_from = properties['learn-from'].split(',') rule = get_redistribution_rule(prefix_name, 'bgp' in learn_from, 'ospf' in learn_from, 'static' in learn_from, 'connected' in learn_from, properties['action']) edge_ids = properties['gw-edge-ids'].split(',') for edge_id in edge_ids: try: bgp_config = nsxv.get_routing_bgp_config(edge_id) if not bgp_config['bgp'].get('enabled'): LOG.error("BGP is not enabled on edge %s", edge_id) return if not bgp_config['bgp']['redistribution']['enabled']: LOG.error("BGP redistribution is not enabled on edge %s", edge_id) return nsxv.add_bgp_redistribution_rules(edge_id, prefixes, [rule]) except exceptions.ResourceNotFound: LOG.error("Edge %s was not found", edge_id) return res = [{'edge_id': edge_id, 'prefix': prefix_name if prefix_name else 'ANY', 'learner-protocol': 'bgp', 'learn-from': ', '.join(set(learn_from)), 'action': properties['action']} for edge_id in edge_ids] headers = ['edge_id', 'prefix', 'learner-protocol', 'learn-from', 'action'] LOG.info(formatters.output_formatter( 'Routing redistribution rule', res, headers)) def delete_redis_rule(resource, event, trigger, **kwargs): usage = ("nsxadmin -r routing-redistribution-rule -o delete " "--property gw-edge-ids=[,...]" "[--property prefix-name=]") required_params = ('gw-edge-ids', ) properties = admin_utils.parse_multi_keyval_opt(kwargs.get('property', [])) if not properties or not set(required_params) <= set(properties.keys()): LOG.error(usage) return edge_ids = properties['gw-edge-ids'].split(',') # If no prefix-name is given then remove rules configured with default # prefix. prefixes = [properties.get('prefix-name')] for edge_id in edge_ids: try: nsxv.remove_bgp_redistribution_rules(edge_id, prefixes) except exceptions.ResourceNotFound: LOG.error("Edge %s was not found", edge_id) return @admin_utils.output_header def add_bgp_neighbour(resource, event, trigger, **kwargs): usage = ("nsxadmin -r bgp-neighbour -o create " "--property gw-edge-ids=[,...] " "--property ip-address= " "--property remote-as= " "--property password=") required_params = ('gw-edge-ids', 'ip-address', 'remote-as', 'password') properties = admin_utils.parse_multi_keyval_opt(kwargs.get('property', [])) if not properties or not set(required_params) <= set(properties.keys()): LOG.error(usage) return remote_as = properties['remote-as'] if not _validate_asn(remote_as): return nbr = nsxv_bgp.gw_bgp_neighbour(properties['ip-address'], properties['remote-as'], properties['password']) edge_ids = properties['gw-edge-ids'].split(',') for edge_id in edge_ids: try: nsxv.add_bgp_neighbours(edge_id, [nbr]) except exceptions.ResourceNotFound: LOG.error("Edge %s was not found", edge_id) return res = [{'edge_id': edge_id, 'ip_address': properties['ip-address'], 'remote_as': properties['remote-as'], 'hold_down_timer': cfg.CONF.nsxv.bgp_neighbour_hold_down_timer, 'keep_alive_timer': cfg.CONF.nsxv.bgp_neighbour_keep_alive_timer} for edge_id in edge_ids] headers = ['edge_id', 'ip_address', 'remote_as', 'hold_down_timer', 'keep_alive_timer'] LOG.info(formatters.output_formatter('New BPG neighbour', res, headers)) def remove_bgp_neighbour(resource, event, trigger, **kwargs): usage = ("nsxadmin -r bgp-neighbour -o delete " "--property gw-edge-ids=[,...] " "--property ip-address=") required_params = ('gw-edge-ids', 'ip-address') properties = admin_utils.parse_multi_keyval_opt(kwargs.get('property', [])) if not properties or not set(required_params) <= set(properties.keys()): LOG.error(usage) return nbr = nsxv_bgp.gw_bgp_neighbour(properties['ip-address'], '', '') edge_ids = properties['gw-edge-ids'].split(',') for edge_id in edge_ids: try: nsxv.remove_bgp_neighbours(edge_id, [nbr]) except exceptions.ResourceNotFound: LOG.error("Edge %s was not found", edge_id) return registry.subscribe(create_bgp_gw, constants.BGP_GW_EDGE, shell.Operations.CREATE.value) registry.subscribe(delete_bgp_gw, constants.BGP_GW_EDGE, shell.Operations.DELETE.value) registry.subscribe(list_bgp_edges, constants.BGP_GW_EDGE, shell.Operations.LIST.value) registry.subscribe(create_redis_rule, constants.ROUTING_REDIS_RULE, shell.Operations.CREATE.value) registry.subscribe(delete_redis_rule, constants.ROUTING_REDIS_RULE, shell.Operations.DELETE.value) registry.subscribe(add_bgp_neighbour, constants.BGP_NEIGHBOUR, shell.Operations.CREATE.value) registry.subscribe(remove_bgp_neighbour, constants.BGP_NEIGHBOUR, shell.Operations.DELETE.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv/resources/networks.py0000666000175100017510000002432113244523345027246 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import xml.etree.ElementTree as et from neutron_lib.callbacks import registry from neutron_lib import context from oslo_log import log as logging from oslo_serialization import jsonutils from vmware_nsx.db import db as nsx_db from vmware_nsx.plugins.nsx_v.vshield.common import exceptions from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv.resources import utils as utils from vmware_nsx.shell import resources as shell LOG = logging.getLogger(__name__) nsxv = utils.get_nsxv_client() network_types = ['Network', 'VirtualWire', 'DistributedVirtualPortgroup'] PORTGROUP_PREFIX = 'dvportgroup' def get_networks_from_backend(): nsxv = utils.get_nsxv_client() so_list = nsxv.get_scoping_objects() return et.fromstring(so_list) def get_networks(): """Create an array of all the backend networks and their data """ root = get_networks_from_backend() networks = [] for obj in root.iter('object'): if obj.find('objectTypeName').text in network_types: networks.append({'type': obj.find('objectTypeName').text, 'moref': obj.find('objectId').text, 'name': obj.find('name').text}) return networks def get_networks_name_map(): """Create a dictionary mapping moref->backend name """ root = get_networks_from_backend() networks = {} for obj in root.iter('object'): if obj.find('objectTypeName').text in network_types: networks[obj.find('objectId').text] = obj.find('name').text return networks @admin_utils.output_header def neutron_list_networks(resource, event, trigger, **kwargs): LOG.info(formatters.output_formatter(constants.NETWORKS, get_networks(), ['type', 'moref', 'name'])) @admin_utils.output_header def nsx_update_switch(resource, event, trigger, **kwargs): nsxv = utils.get_nsxv_client() if not kwargs.get('property'): LOG.error("Need to specify dvs-id parameter and " "attribute to update. Add --property dvs-id= " "--property teamingpolicy=") return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) dvs_id = properties.get('dvs-id') if not dvs_id: LOG.error("Need to specify dvs-id. " "Add --property dvs-id=") return try: h, switch = nsxv.get_vdn_switch(dvs_id) except exceptions.ResourceNotFound: LOG.error("DVS %s not found", dvs_id) return supported_policies = ['ETHER_CHANNEL', 'LOADBALANCE_LOADBASED', 'LOADBALANCE_SRCID', 'LOADBALANCE_SRCMAC', 'FAILOVER_ORDER', 'LACP_ACTIVE', 'LACP_PASSIVE', 'LACP_V2'] policy = properties.get('teamingpolicy') if policy in supported_policies: if switch['teamingPolicy'] == policy: LOG.info("Policy already set!") return LOG.info("Updating NSXv switch %(dvs)s teaming policy to " "%(policy)s", {'dvs': dvs_id, 'policy': policy}) switch['teamingPolicy'] = policy try: switch = nsxv.update_vdn_switch(switch) except exceptions.VcnsApiException as e: desc = jsonutils.loads(e.response) details = desc.get('details') if details.startswith("No enum constant"): LOG.error("Unknown teaming policy %s", policy) else: LOG.error("Unexpected error occurred: %s", details) return LOG.info("Switch value after update: %s", switch) else: LOG.info("Current switch value is: %s", switch) LOG.error("Invalid teaming policy. " "Add --property teamingpolicy=") LOG.error("Possible values: %s", ', '.join(supported_policies)) @admin_utils.output_header def list_missing_networks(resource, event, trigger, **kwargs): """List the neutron networks which are missing the backend moref """ # get the neutron-nsx networks mapping from DB admin_context = context.get_admin_context() mappings = nsx_db.get_nsx_networks_mapping(admin_context.session) # get the list of backend networks: backend_networks = get_networks_name_map() missing_networks = [] # For each neutron network - check if there is a matching backend network for entry in mappings: nsx_id = entry['nsx_id'] dvs_id = entry['dvs_id'] if nsx_id not in backend_networks.keys(): missing_networks.append({'neutron_id': entry['neutron_id'], 'moref': nsx_id, 'dvs_id': dvs_id}) elif dvs_id: netname = backend_networks[nsx_id] if not netname.startswith(dvs_id): missing_networks.append({'neutron_id': entry['neutron_id'], 'moref': nsx_id, 'dvs_id': dvs_id}) LOG.info(formatters.output_formatter(constants.MISSING_NETWORKS, missing_networks, ['neutron_id', 'moref', 'dvs_id'])) @admin_utils.output_header def list_orphaned_networks(resource, event, trigger, **kwargs): """List the NSX networks which are missing the neutron DB """ admin_context = context.get_admin_context() missing_networks = [] # get the list of backend networks: backend_networks = get_networks() for net in backend_networks: moref = net['moref'] backend_name = net['name'] # Decide if this is a neutron network by its name (which should always # contain the net-id), and type if (backend_name.startswith('edge-') or len(backend_name) < 36 or net['type'] == 'Network'): # This is not a neutron network continue # get the list of neutron networks with this moref neutron_networks = nsx_db.get_nsx_network_mapping_for_nsx_id( admin_context.session, moref) if not neutron_networks: # no network found for this moref missing_networks.append(net) elif moref.startswith(PORTGROUP_PREFIX): # This is a VLAN network. Also verify that the DVS Id matches for entry in neutron_networks: if (not entry['dvs_id'] or backend_name.startswith(entry['dvs_id'])): found = True # this moref & dvs-id does not appear in the DB if not found: missing_networks.append(net) LOG.info(formatters.output_formatter(constants.ORPHANED_NETWORKS, missing_networks, ['type', 'moref', 'name'])) def get_dvs_id_from_backend_name(backend_name): reg = re.search(r"^dvs-\d*", backend_name) if reg: return reg.group(0) @admin_utils.output_header def delete_backend_network(resource, event, trigger, **kwargs): """Delete a backend network by its moref """ errmsg = ("Need to specify moref property. Add --property moref=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) moref = properties.get('moref') if not moref: LOG.error("%s", errmsg) return backend_name = get_networks_name_map().get(moref) if not backend_name: LOG.error("Failed to find the backend network %(moref)s", {'moref': moref}) return # Note: in case the backend network is attached to other backend objects, # like VM, the deleting may fail and through an exception nsxv = utils.get_nsxv_client() if moref.startswith(PORTGROUP_PREFIX): # get the dvs id from the backend name: dvs_id = get_dvs_id_from_backend_name(backend_name) if not dvs_id: LOG.error("Failed to find the DVS id of backend network " "%(moref)s", {'moref': moref}) else: try: nsxv.delete_port_group(dvs_id, moref) except Exception as e: LOG.error("Failed to delete backend network %(moref)s : " "%(e)s", {'moref': moref, 'e': e}) else: LOG.info("Backend network %(moref)s was deleted", {'moref': moref}) else: # Virtual wire try: nsxv.delete_virtual_wire(moref) except Exception as e: LOG.error("Failed to delete backend network %(moref)s : " "%(e)s", {'moref': moref, 'e': e}) else: LOG.info("Backend network %(moref)s was deleted", {'moref': moref}) registry.subscribe(neutron_list_networks, constants.NETWORKS, shell.Operations.LIST.value) registry.subscribe(nsx_update_switch, constants.NETWORKS, shell.Operations.NSX_UPDATE.value) registry.subscribe(list_missing_networks, constants.MISSING_NETWORKS, shell.Operations.LIST.value) registry.subscribe(list_orphaned_networks, constants.ORPHANED_NETWORKS, shell.Operations.LIST.value) registry.subscribe(delete_backend_network, constants.ORPHANED_NETWORKS, shell.Operations.NSX_CLEAN.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv/__init__.py0000666000175100017510000000000013244523345025103 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/__init__.py0000666000175100017510000000000013244523345024105 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv3/0000775000175100017510000000000013244524600023060 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv3/resources/0000775000175100017510000000000013244524600025072 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv3/resources/loadbalancer.py0000666000175100017510000000702313244523345030064 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils from vmware_nsxlib.v3 import nsx_constants as consts LOG = logging.getLogger(__name__) @admin_utils.list_handler(constants.LB_SERVICES) @admin_utils.output_header def nsx_list_lb_services(resource, event, trigger, **kwargs): """List LB services on NSX backend""" nsxlib = utils.get_connected_nsxlib() if not nsxlib.feature_supported(consts.FEATURE_LOAD_BALANCER): LOG.error("This utility is not available for NSX version %s", nsxlib.get_version()) return lb_services = nsxlib.load_balancer.service.list() LOG.info(formatters.output_formatter( constants.LB_SERVICES, [lb_services['results']], ['display_name', 'id', 'virtual_server_ids', 'attachment'])) return bool(lb_services) @admin_utils.list_handler(constants.LB_VIRTUAL_SERVERS) @admin_utils.output_header def nsx_list_lb_virtual_servers(resource, event, trigger, **kwargs): """List LB virtual servers on NSX backend""" nsxlib = utils.get_connected_nsxlib() if not nsxlib.feature_supported(consts.FEATURE_LOAD_BALANCER): LOG.error("This utility is not available for NSX version %s", nsxlib.get_version()) return lb_virtual_servers = nsxlib.load_balancer.virtual_server.list() LOG.info(formatters.output_formatter( constants.LB_VIRTUAL_SERVERS, [lb_virtual_servers['results']], ['display_name', 'id', 'ip_address', 'pool_id'])) return bool(lb_virtual_servers) @admin_utils.list_handler(constants.LB_POOLS) @admin_utils.output_header def nsx_list_lb_pools(resource, event, trigger, **kwargs): nsxlib = utils.get_connected_nsxlib() if not nsxlib.feature_supported(consts.FEATURE_LOAD_BALANCER): LOG.error("This utility is not available for NSX version %s", nsxlib.get_version()) return lb_pools = nsxlib.load_balancer.pool.list() LOG.info(formatters.output_formatter( constants.LB_POOLS, [lb_pools['results']], ['display_name', 'id', 'active_monitor_ids', 'members'])) return bool(lb_pools) @admin_utils.list_handler(constants.LB_MONITORS) @admin_utils.output_header def nsx_list_lb_monitors(resource, event, trigger, **kwargs): nsxlib = utils.get_connected_nsxlib() if not nsxlib.feature_supported(consts.FEATURE_LOAD_BALANCER): LOG.error("This utility is not available for NSX version %s", nsxlib.get_version()) return lb_monitors = nsxlib.load_balancer.monitor.list() LOG.info(formatters.output_formatter( constants.LB_MONITORS, [lb_monitors['results']], ['display_name', 'id', 'resource_type'])) return bool(lb_monitors) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv3/resources/utils.py0000666000175100017510000001655513244523413026623 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.db import db_base_plugin_v2 from neutron.db import l3_dvr_db # noqa from neutron import manager from neutron_lib import context from neutron_lib.plugins import constants as const from neutron_lib.plugins import directory from neutron_fwaas.services.firewall import fwaas_plugin as fwaas_plugin_v1 from neutron_fwaas.services.firewall import fwaas_plugin_v2 from vmware_nsx.common import config from vmware_nsx.db import db as nsx_db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx_v3 import plugin from vmware_nsx.plugins.nsx_v3 import utils as v3_utils from vmware_nsx.services.fwaas.nsx_v3 import fwaas_callbacks_v1 from vmware_nsx.services.fwaas.nsx_v3 import fwaas_callbacks_v2 from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsxlib.v3 import nsx_constants _NSXLIB = None def get_nsxv3_client(nsx_username=None, nsx_password=None, use_basic_auth=False): return get_connected_nsxlib(nsx_username, nsx_password, use_basic_auth).client def get_connected_nsxlib(nsx_username=None, nsx_password=None, use_basic_auth=False): global _NSXLIB # for non-default agruments, initiate new lib if nsx_username or use_basic_auth: return v3_utils.get_nsxlib_wrapper(nsx_username, nsx_password, use_basic_auth) if _NSXLIB is None: _NSXLIB = v3_utils.get_nsxlib_wrapper() return _NSXLIB def get_plugin_filters(context): return admin_utils.get_plugin_filters( context, projectpluginmap.NsxPlugins.NSX_T) class NeutronDbClient(db_base_plugin_v2.NeutronDbPluginV2): def __init__(self): super(NeutronDbClient, self).__init__() self.context = context.get_admin_context() self.filters = get_plugin_filters(self.context) def _update_filters(self, requested_filters): filters = self.filters.copy() if requested_filters: filters.update(requested_filters) return filters def get_ports(self, filters=None, fields=None): filters = self._update_filters(filters) return super(NeutronDbClient, self).get_ports( self.context, filters=filters, fields=fields) def get_networks(self, filters=None, fields=None): filters = self._update_filters(filters) return super(NeutronDbClient, self).get_networks( self.context, filters=filters, fields=fields) def get_network(self, network_id): return super(NeutronDbClient, self).get_network( self.context, network_id) def get_subnet(self, subnet_id): return super(NeutronDbClient, self).get_subnet(self.context, subnet_id) def get_lswitch_and_lport_id(self, port_id): return nsx_db.get_nsx_switch_and_port_id(self.context.session, port_id) def lswitch_id_to_net_id(self, lswitch_id): net_ids = nsx_db.get_net_ids(self.context.session, lswitch_id) return net_ids[0] if net_ids else None def lrouter_id_to_router_id(self, lrouter_id): return nsx_db.get_neutron_from_nsx_router_id(self.context.session, lrouter_id) def net_id_to_lswitch_id(self, net_id): lswitch_ids = nsx_db.get_nsx_switch_ids(self.context.session, net_id) return lswitch_ids[0] if lswitch_ids else None def add_dhcp_service_binding(self, network_id, port_id, server_id): return nsx_db.add_neutron_nsx_service_binding( self.context.session, network_id, port_id, nsx_constants.SERVICE_DHCP, server_id) def add_dhcp_static_binding(self, port_id, subnet_id, ip_address, server_id, binding_id): return nsx_db.add_neutron_nsx_dhcp_binding( self.context.session, port_id, subnet_id, ip_address, server_id, binding_id) class NsxV3PluginWrapper(plugin.NsxV3Plugin): def __init__(self): # initialize the availability zones config.register_nsxv3_azs(cfg.CONF, cfg.CONF.nsx_v3.availability_zones) super(NsxV3PluginWrapper, self).__init__() self.context = context.get_admin_context() def __enter__(self): directory.add_plugin(const.CORE, self) return self def __exit__(self, exc_type, exc_value, traceback): directory.add_plugin(const.CORE, None) def _init_fwaas_plugin(self, provider, callbacks_class, plugin_callbacks): fwaas_plugin_class = manager.NeutronManager.load_class_for_provider( 'neutron.service_plugins', provider) fwaas_plugin = fwaas_plugin_class() self.fwaas_callbacks = callbacks_class() # override the fwplugin_rpc since there is no RPC support in adminutils self.fwaas_callbacks.fwplugin_rpc = plugin_callbacks(fwaas_plugin) self.init_is_complete = True def init_fwaas_for_admin_utils(self): # initialize the FWaaS plugin and callbacks self.fwaas_callbacks = None # This is an ugly patch to find out if it is v1 or v2 service_plugins = cfg.CONF.service_plugins for srv_plugin in service_plugins: if 'firewall' in srv_plugin: if 'v2' in srv_plugin: # FWaaS V2 self._init_fwaas_plugin( 'firewall_v2', fwaas_callbacks_v2.Nsxv3FwaasCallbacksV2, fwaas_plugin_v2.FirewallCallbacks) else: # FWaaS V1 self._init_fwaas_plugin( 'firewall', fwaas_callbacks_v1.Nsxv3FwaasCallbacksV1, fwaas_plugin_v1.FirewallCallbacks) return def _init_dhcp_metadata(self): pass def _process_security_group_logging(self): pass def _init_port_security_profile(self): return True def _extend_get_network_dict_provider(self, context, net): self._extend_network_dict_provider(context, net) # skip getting the Qos policy ID because get_object calls # plugin init again on admin-util environment def _extend_get_port_dict_binding(self, context, port): self._extend_port_dict_binding(context, port) # skip getting the Qos policy ID because get_object calls # plugin init again on admin-util environment def delete_network(self, network_id): return super(NsxV3PluginWrapper, self).delete_network( self.context, network_id) def remove_router_interface(self, router_id, interface): return super(NsxV3PluginWrapper, self).remove_router_interface( self.context, router_id, interface) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv3/resources/dhcp_binding.py0000666000175100017510000002173613244523345030074 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.callbacks import registry from neutron_lib import constants as const from neutron_lib import context as neutron_context from neutron_lib import exceptions from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.common import utils as nsx_utils from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils import vmware_nsx.shell.resources as shell from vmware_nsxlib.v3 import nsx_constants LOG = logging.getLogger(__name__) neutron_client = utils.NeutronDbClient() @admin_utils.output_header def list_dhcp_bindings(resource, event, trigger, **kwargs): """List DHCP bindings in Neutron.""" comp_ports = [port for port in neutron_client.get_ports() if nsx_utils.is_port_dhcp_configurable(port)] LOG.info(formatters.output_formatter(constants.DHCP_BINDING, comp_ports, ['id', 'mac_address', 'fixed_ips'])) @admin_utils.output_header def nsx_update_dhcp_bindings(resource, event, trigger, **kwargs): """Resync DHCP bindings for NSXv3 CrossHairs.""" nsxlib = utils.get_connected_nsxlib() nsx_version = nsxlib.get_version() if not nsx_utils.is_nsx_version_1_1_0(nsx_version): LOG.error("This utility is not available for NSX version %s", nsx_version) return dhcp_profile_uuid = None # TODO(asarfaty) Add availability zones support here if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) dhcp_profile_uuid = properties.get('dhcp_profile_uuid') if not dhcp_profile_uuid: LOG.error("dhcp_profile_uuid is not defined") return cfg.CONF.set_override('dhcp_agent_notification', False) cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3') cfg.CONF.set_override('dhcp_profile', dhcp_profile_uuid, 'nsx_v3') port_bindings = {} # lswitch_id: [(port_id, mac, ip), ...] server_bindings = {} # lswitch_id: dhcp_server_id ports = neutron_client.get_ports() for port in ports: device_owner = port['device_owner'] if (device_owner != const.DEVICE_OWNER_DHCP and not nsx_utils.is_port_dhcp_configurable(port)): continue for fixed_ip in port['fixed_ips']: if netaddr.IPNetwork(fixed_ip['ip_address']).version == 6: continue network_id = port['network_id'] subnet = neutron_client.get_subnet(fixed_ip['subnet_id']) if device_owner == const.DEVICE_OWNER_DHCP: # For each DHCP-enabled network, create a logical DHCP server # and update the attachment type to DHCP on the corresponding # logical port of the Neutron DHCP port. network = neutron_client.get_network(port['network_id']) net_tags = nsxlib.build_v3_tags_payload( network, resource_type='os-neutron-net-id', project_name='admin') # TODO(asarfaty): add default_dns_nameservers & dns_domain # from availability zone server_data = nsxlib.native_dhcp.build_server_config( network, subnet, port, net_tags) server_data['dhcp_profile_id'] = dhcp_profile_uuid dhcp_server = nsxlib.dhcp_server.create(**server_data) LOG.info("Created logical DHCP server %(server)s for " "network %(network)s", {'server': dhcp_server['id'], 'network': port['network_id']}) # Add DHCP service binding in neutron DB. neutron_client.add_dhcp_service_binding( network['id'], port['id'], dhcp_server['id']) # Update logical port for DHCP purpose. lswitch_id, lport_id = ( neutron_client.get_lswitch_and_lport_id(port['id'])) nsxlib.logical_port.update( lport_id, dhcp_server['id'], attachment_type=nsx_constants.ATTACHMENT_DHCP) server_bindings[lswitch_id] = dhcp_server['id'] LOG.info("Updated DHCP logical port %(port)s for " "network %(network)s", {'port': lport_id, 'network': port['network_id']}) elif subnet['enable_dhcp']: # Store (mac, ip) binding of each compute port in a # DHCP-enabled subnet. lswitch_id = neutron_client.net_id_to_lswitch_id(network_id) bindings = port_bindings.get(lswitch_id, []) bindings.append((port['id'], port['mac_address'], fixed_ip['ip_address'], fixed_ip['subnet_id'])) port_bindings[lswitch_id] = bindings break # process only the first IPv4 address # Populate mac/IP bindings in each logical DHCP server. for lswitch_id, bindings in port_bindings.items(): dhcp_server_id = server_bindings.get(lswitch_id) if not dhcp_server_id: continue for (port_id, mac, ip, subnet_id) in bindings: hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': ip}]}} subnet = neutron_client.get_subnet(subnet_id) binding = nsxlib.dhcp_server.create_binding( dhcp_server_id, mac, ip, hostname, cfg.CONF.nsx_v3.dhcp_lease_time, options, subnet.get('gateway_ip')) # Add DHCP static binding in neutron DB. neutron_client.add_dhcp_static_binding( port_id, subnet_id, ip, dhcp_server_id, binding['id']) LOG.info("Added DHCP binding (mac: %(mac)s, ip: %(ip)s) " "for neutron port %(port)s", {'mac': mac, 'ip': ip, 'port': port_id}) @admin_utils.output_header def nsx_recreate_dhcp_server(resource, event, trigger, **kwargs): """Recreate DHCP server & binding for a neutron network""" if not cfg.CONF.nsx_v3.native_dhcp_metadata: LOG.error("Native DHCP is disabled.") return errmsg = ("Need to specify net-id property. Add --property net-id=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) net_id = properties.get('net-id') if not net_id: LOG.error("%s", errmsg) return context = neutron_context.get_admin_context() with utils.NsxV3PluginWrapper() as plugin: # verify that this is an existing network with dhcp enabled try: network = plugin._get_network(context, net_id) except exceptions.NetworkNotFound: LOG.error("Network %s was not found", net_id) return if plugin._has_no_dhcp_enabled_subnet(context, network): LOG.error("Network %s has no DHCP enabled subnet", net_id) return dhcp_relay = plugin.get_network_az_by_net_id( context, net_id).dhcp_relay_service if dhcp_relay: LOG.error("Native DHCP should not be enabled with dhcp relay") return # find the dhcp subnet of this network subnet_id = None for subnet in network.subnets: if subnet.enable_dhcp: subnet_id = subnet.id break if not subnet_id: LOG.error("Network %s has no DHCP enabled subnet", net_id) return dhcp_subnet = plugin.get_subnet(context, subnet_id) # disable and re-enable the dhcp plugin._enable_native_dhcp(context, network, dhcp_subnet) LOG.info("Done.") registry.subscribe(list_dhcp_bindings, constants.DHCP_BINDING, shell.Operations.LIST.value) registry.subscribe(nsx_update_dhcp_bindings, constants.DHCP_BINDING, shell.Operations.NSX_UPDATE.value) registry.subscribe(nsx_recreate_dhcp_server, constants.DHCP_BINDING, shell.Operations.NSX_RECREATE.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv3/resources/ports.py0000666000175100017510000003653413244523413026631 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from sqlalchemy.orm import exc from vmware_nsx.common import utils as nsx_utils from vmware_nsx.db import db as nsx_db from vmware_nsx.db import nsx_models from vmware_nsx.dvs import dvs from vmware_nsx.plugins.nsx_v3 import plugin from vmware_nsx.services.qos.common import utils as qos_utils from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils as v3_utils from vmware_nsx.shell import resources as shell from vmware_nsxlib.v3 import exceptions as nsx_exc from vmware_nsxlib.v3 import nsx_constants as nsxlib_consts from vmware_nsxlib.v3 import resources from vmware_nsxlib.v3 import security from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db import db_base_plugin_v2 from neutron.db import l3_db from neutron.db import portsecurity_db from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.callbacks import registry from neutron_lib import constants as const from neutron_lib import context as neutron_context from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory LOG = logging.getLogger(__name__) class PortsPlugin(db_base_plugin_v2.NeutronDbPluginV2, portsecurity_db.PortSecurityDbMixin, addr_pair_db.AllowedAddressPairsMixin): def __enter__(self): directory.add_plugin(plugin_constants.CORE, self) return self def __exit__(self, exc_type, exc_value, traceback): directory.add_plugin(plugin_constants.CORE, None) def get_port_nsx_id(session, neutron_id): # get the nsx port id from the DB mapping try: mapping = (session.query(nsx_models.NeutronNsxPortMapping). filter_by(neutron_id=neutron_id). one()) return mapping['nsx_port_id'] except exc.NoResultFound: pass def get_network_nsx_id(session, neutron_id): # get the nsx switch id from the DB mapping mappings = nsx_db.get_nsx_switch_ids(session, neutron_id) if not mappings or len(mappings) == 0: LOG.debug("Unable to find NSX mappings for neutron " "network %s.", neutron_id) # fallback in case we didn't find the id in the db mapping # This should not happen, but added here in case the network was # created before this code was added. return neutron_id else: return mappings[0] def get_port_and_profile_clients(): _nsx_client = v3_utils.get_nsxv3_client() return (resources.LogicalPort(_nsx_client), resources.SwitchingProfile(_nsx_client)) def get_dhcp_profile_id(profile_client): profiles = profile_client.find_by_display_name( plugin.NSX_V3_DHCP_PROFILE_NAME) if profiles and len(profiles) == 1: return profiles[0]['id'] LOG.warning("Could not find DHCP profile on backend") def get_spoofguard_profile_id(profile_client): profiles = profile_client.find_by_display_name( plugin.NSX_V3_PSEC_PROFILE_NAME) if profiles and len(profiles) == 1: return profiles[0]['id'] LOG.warning("Could not find Spoof Guard profile on backend") def add_profile_mismatch(problems, neutron_id, nsx_id, prf_id, title): msg = ('Wrong %(title)s profile %(prf_id)s') % {'title': title, 'prf_id': prf_id} problems.append({'neutron_id': neutron_id, 'nsx_id': nsx_id, 'error': msg}) @admin_utils.output_header def list_missing_ports(resource, event, trigger, **kwargs): """List neutron ports that are missing the NSX backend port And ports with wrong switch profiles """ admin_cxt = neutron_context.get_admin_context() filters = v3_utils.get_plugin_filters(admin_cxt) with PortsPlugin() as plugin: neutron_ports = plugin.get_ports(admin_cxt, filters=filters) port_client, profile_client = get_port_and_profile_clients() # get pre-defined profile ids dhcp_profile_id = get_dhcp_profile_id(profile_client) dhcp_profile_key = resources.SwitchingProfileTypes.SWITCH_SECURITY spoofguard_profile_id = get_spoofguard_profile_id(profile_client) spoofguard_profile_key = resources.SwitchingProfileTypes.SPOOF_GUARD qos_profile_key = resources.SwitchingProfileTypes.QOS problems = [] for port in neutron_ports: neutron_id = port['id'] # get the network nsx id from the mapping table nsx_id = get_port_nsx_id(admin_cxt.session, neutron_id) if not nsx_id: # skip external ports pass else: try: nsx_port = port_client.get(nsx_id) except nsx_exc.ResourceNotFound: problems.append({'neutron_id': neutron_id, 'nsx_id': nsx_id, 'error': 'Missing from backend'}) continue # Port found on backend! # Check that it has all the expected switch profiles. # create a dictionary of the current profiles: profiles_dict = {} for prf in nsx_port['switching_profile_ids']: profiles_dict[prf['key']] = prf['value'] # DHCP port: neutron dhcp profile should be attached # to logical ports created for neutron DHCP but not # for native DHCP. if (port.get('device_owner') == const.DEVICE_OWNER_DHCP and not cfg.CONF.nsx_v3.native_dhcp_metadata): prf_id = profiles_dict[dhcp_profile_key] if prf_id != dhcp_profile_id: add_profile_mismatch(problems, neutron_id, nsx_id, prf_id, "DHCP security") # Port with QoS policy: a matching profile should be attached qos_policy_id = qos_utils.get_port_policy_id(admin_cxt, neutron_id) if qos_policy_id: qos_profile_id = nsx_db.get_switch_profile_by_qos_policy( admin_cxt.session, qos_policy_id) prf_id = profiles_dict[qos_profile_key] if prf_id != qos_profile_id: add_profile_mismatch(problems, neutron_id, nsx_id, prf_id, "QoS") # Port with security & fixed ips/address pairs: # neutron spoofguard profile should be attached port_sec, has_ip = plugin._determine_port_security_and_has_ip( admin_cxt, port) addr_pair = port.get(addr_apidef.ADDRESS_PAIRS) if port_sec and (has_ip or addr_pair): prf_id = profiles_dict[spoofguard_profile_key] if prf_id != spoofguard_profile_id: add_profile_mismatch(problems, neutron_id, nsx_id, prf_id, "Spoof Guard") if len(problems) > 0: title = ("Found internal ports misconfiguration on the " "NSX manager:") LOG.info(formatters.output_formatter( title, problems, ['neutron_id', 'nsx_id', 'error'])) else: LOG.info("All internal ports verified on the NSX manager") def get_vm_network_device(vm_mng, vm_moref, mac_address): """Return the network device with MAC 'mac_address'. This code was inspired by Nova vif.get_network_device """ hardware_devices = vm_mng.get_vm_interfaces_info(vm_moref) if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": hardware_devices = hardware_devices.VirtualDevice for device in hardware_devices: if hasattr(device, 'macAddress'): if device.macAddress == mac_address: return device def migrate_compute_ports_vms(resource, event, trigger, **kwargs): """Update the VMs ports on the backend after migrating nsx-v -> nsx-v3 After using api_replay to migrate the neutron data from NSX-V to NSX-T we need to update the VM ports to use OpaqueNetwork instead of DistributedVirtualPortgroup """ # Connect to the DVS manager, using the configuration parameters try: vm_mng = dvs.VMManager() except Exception as e: LOG.error("Cannot connect to the DVS: Please update the [dvs] " "section in the nsx.ini file: %s", e) return # Go over all the compute ports from the plugin admin_cxt = neutron_context.get_admin_context() port_filters = v3_utils.get_plugin_filters(admin_cxt) port_filters['device_owner'] = ['compute:None'] with PortsPlugin() as plugin: neutron_ports = plugin.get_ports(admin_cxt, filters=port_filters) for port in neutron_ports: device_id = port.get('device_id') # get the vm moref & spec from the DVS vm_moref = vm_mng.get_vm_moref_obj(device_id) vm_spec = vm_mng.get_vm_spec(vm_moref) if not vm_spec: LOG.error("Failed to get the spec of vm %s", device_id) continue # Go over the VM interfaces and check if it should be updated update_spec = False for prop in vm_spec.propSet: if (prop.name == 'network' and hasattr(prop.val, 'ManagedObjectReference')): for net in prop.val.ManagedObjectReference: if net._type == 'DistributedVirtualPortgroup': update_spec = True if not update_spec: LOG.info("No need to update the spec of vm %s", device_id) continue # find the old interface by it's mac and delete it device = get_vm_network_device(vm_mng, vm_moref, port['mac_address']) if device is None: LOG.warning("No device with MAC address %s exists on the VM", port['mac_address']) continue device_type = device.__class__.__name__ LOG.info("Detaching old interface from VM %s", device_id) vm_mng.detach_vm_interface(vm_moref, device) # add the new interface as OpaqueNetwork LOG.info("Attaching new interface to VM %s", device_id) nsx_net_id = get_network_nsx_id(admin_cxt.session, port['network_id']) vm_mng.attach_vm_interface(vm_moref, port['id'], port['mac_address'], nsx_net_id, device_type) def migrate_exclude_ports(resource, event, trigger, **kwargs): _nsx_client = v3_utils.get_nsxv3_client() nsxlib = v3_utils.get_connected_nsxlib() version = nsxlib.get_version() if not nsx_utils.is_nsx_version_2_0_0(version): LOG.info("Migration only supported from 2.0 onwards") LOG.info("Version is %s", version) return admin_cxt = neutron_context.get_admin_context() plugin = PortsPlugin() _port_client = resources.LogicalPort(_nsx_client) exclude_list = nsxlib.firewall_section.get_excludelist() for member in exclude_list['members']: if member['target_type'] == 'LogicalPort': port_id = member['target_id'] # Get port try: nsx_port = _port_client.get(port_id) except nsx_exc.ResourceNotFound: LOG.info("Port %s not found", port_id) continue # Validate its a neutron port is_neutron_port = False for tag in nsx_port['tags']: if tag['scope'] == 'os-neutron-port-id': is_neutron_port = True neutron_port_id = tag['tag'] break if not is_neutron_port: LOG.info("Port %s is not a neutron port", port_id) continue # Check if this port exists in the DB try: plugin.get_port(admin_cxt, neutron_port_id) except Exception: LOG.info("Port %s is not defined in DB", neutron_port_id) continue # Update tag for the port tags_update = [{'scope': security.PORT_SG_SCOPE, 'tag': nsxlib_consts.EXCLUDE_PORT}] _port_client.update(port_id, None, tags_update=tags_update) # Remove port from the exclude list nsxlib.firewall_section.remove_member_from_fw_exclude_list( port_id, nsxlib_consts.TARGET_TYPE_LOGICAL_PORT) LOG.info("Port %s successfully updated", port_id) def tag_default_ports(resource, event, trigger, **kwargs): nsxlib = v3_utils.get_connected_nsxlib() admin_cxt = neutron_context.get_admin_context() filters = v3_utils.get_plugin_filters(admin_cxt) # the plugin creation below will create the NS group and update the default # OS section to have the correct applied to group with v3_utils.NsxV3PluginWrapper() as _plugin: neutron_ports = _plugin.get_ports(admin_cxt, filters=filters) for port in neutron_ports: neutron_id = port['id'] # get the network nsx id from the mapping table nsx_id = get_port_nsx_id(admin_cxt.session, neutron_id) if not nsx_id: continue device_owner = port['device_owner'] if (device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF or device_owner == const.DEVICE_OWNER_DHCP): continue ps = _plugin._get_port_security_binding(admin_cxt, neutron_id) if not ps: continue try: nsx_port = nsxlib.logical_port.get(nsx_id) except nsx_exc.ResourceNotFound: continue tags_update = nsx_port['tags'] tags_update += [{'scope': security.PORT_SG_SCOPE, 'tag': plugin.NSX_V3_DEFAULT_SECTION}] nsxlib.logical_port.update(nsx_id, None, tags_update=tags_update) registry.subscribe(list_missing_ports, constants.PORTS, shell.Operations.LIST_MISMATCHES.value) registry.subscribe(migrate_compute_ports_vms, constants.PORTS, shell.Operations.NSX_MIGRATE_V_V3.value) registry.subscribe(migrate_exclude_ports, constants.PORTS, shell.Operations.NSX_MIGRATE_EXCLUDE_PORTS.value) registry.subscribe(tag_default_ports, constants.PORTS, shell.Operations.NSX_TAG_DEFAULT.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv3/resources/config.py0000666000175100017510000000265413244523345026727 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import registry from oslo_log import log as logging from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils from vmware_nsx.shell import resources as shell LOG = logging.getLogger(__name__) @admin_utils.output_header def validate_configuration(resource, event, trigger, **kwargs): """Validate the nsxv3 configuration""" try: utils.NsxV3PluginWrapper() except Exception as e: LOG.error("Configuration validation failed: %s", e) else: LOG.info("Configuration validation succeeded") registry.subscribe(validate_configuration, constants.CONFIG, shell.Operations.VALIDATE.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv3/resources/http_service.py0000666000175100017510000000526213244523345030157 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import registry from oslo_log import log as logging from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils import vmware_nsx.shell.resources as shell from vmware_nsxlib.v3 import nsx_constants LOG = logging.getLogger(__name__) neutron_client = utils.NeutronDbClient() @admin_utils.output_header def nsx_rate_limit_show(resource, event, trigger, **kwargs): """Show the current NSX rate limit.""" nsxlib = utils.get_connected_nsxlib() if not nsxlib.feature_supported(nsx_constants.FEATURE_RATE_LIMIT): LOG.error("This utility is not available for NSX version %s", nsxlib.get_version()) return rate_limit = nsxlib.http_services.get_rate_limit() LOG.info("Current NSX rate limit is %s", rate_limit) @admin_utils.output_header def nsx_rate_limit_update(resource, event, trigger, **kwargs): """Set the NSX rate limit The default value is 40. 0 means no limit """ nsxlib = utils.get_connected_nsxlib() if not nsxlib.feature_supported(nsx_constants.FEATURE_RATE_LIMIT): LOG.error("This utility is not available for NSX version %s", nsxlib.get_version()) return rate_limit = None if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) rate_limit = properties.get('value', None) if rate_limit is None: usage = ("nsxadmin -r rate-limit -o nsx-update " "--property value=") LOG.error("Missing parameters. Usage: %s", usage) return nsxlib.http_services.update_rate_limit(rate_limit) LOG.info("NSX rate limit was updated to %s", rate_limit) registry.subscribe(nsx_rate_limit_show, constants.RATE_LIMIT, shell.Operations.SHOW.value) registry.subscribe(nsx_rate_limit_update, constants.RATE_LIMIT, shell.Operations.NSX_UPDATE.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv3/resources/__init__.py0000666000175100017510000000000013244523345027200 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv3/resources/routers.py0000666000175100017510000002267313244523345027170 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from vmware_nsx.common import utils as nsx_utils from vmware_nsx.db import db as nsx_db from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils from vmware_nsx.shell import resources as shell from vmware_nsxlib.v3 import exceptions as nsx_exc from vmware_nsxlib.v3 import nsx_constants from neutron.db import db_base_plugin_v2 from neutron.db import l3_db from neutron_lib.callbacks import registry from neutron_lib import context as neutron_context from oslo_log import log as logging LOG = logging.getLogger(__name__) neutron_client = utils.NeutronDbClient() class RoutersPlugin(db_base_plugin_v2.NeutronDbPluginV2, l3_db.L3_NAT_db_mixin): pass @admin_utils.output_header def list_missing_routers(resource, event, trigger, **kwargs): """List neutron routers that are missing the NSX backend router """ nsxlib = utils.get_connected_nsxlib() plugin = RoutersPlugin() admin_cxt = neutron_context.get_admin_context() filters = utils.get_plugin_filters(admin_cxt) neutron_routers = plugin.get_routers(admin_cxt, filters=filters) routers = [] for router in neutron_routers: neutron_id = router['id'] # get the router nsx id from the mapping table nsx_id = nsx_db.get_nsx_router_id(admin_cxt.session, neutron_id) if not nsx_id: routers.append({'name': router['name'], 'neutron_id': neutron_id, 'nsx_id': None}) else: try: nsxlib.logical_router.get(nsx_id) except nsx_exc.ResourceNotFound: routers.append({'name': router['name'], 'neutron_id': neutron_id, 'nsx_id': nsx_id}) if len(routers) > 0: title = ("Found %d routers missing from the NSX " "manager:") % len(routers) LOG.info(formatters.output_formatter( title, routers, ['name', 'neutron_id', 'nsx_id'])) else: LOG.info("All routers exist on the NSX manager") @admin_utils.output_header def update_nat_rules(resource, event, trigger, **kwargs): """Update all routers NAT rules to not bypass the firewall""" # This feature is supported only since nsx version 2 nsxlib = utils.get_connected_nsxlib() version = nsxlib.get_version() if not nsx_utils.is_nsx_version_2_0_0(version): LOG.info("NAT rules update only supported from 2.0 onwards") LOG.info("Version is %s", version) return # Go over all neutron routers plugin = RoutersPlugin() admin_cxt = neutron_context.get_admin_context() filters = utils.get_plugin_filters(admin_cxt) neutron_routers = plugin.get_routers(admin_cxt, filters=filters) num_of_updates = 0 for router in neutron_routers: neutron_id = router['id'] # get the router nsx id from the mapping table nsx_id = nsx_db.get_nsx_router_id(admin_cxt.session, neutron_id) if nsx_id: # get all NAT rules: rules = nsxlib.logical_router.list_nat_rules(nsx_id)['results'] for rule in rules: if 'nat_pass' not in rule or rule['nat_pass']: nsxlib.logical_router.update_nat_rule( nsx_id, rule['id'], nat_pass=False) num_of_updates = num_of_updates + 1 if num_of_updates: LOG.info("Done updating %s NAT rules", num_of_updates) else: LOG.info("Did not find any NAT rule to update") @admin_utils.output_header def list_orphaned_routers(resource, event, trigger, **kwargs): nsxlib = utils.get_connected_nsxlib() nsx_routers = nsxlib.logical_router.list()['results'] missing_routers = [] for nsx_router in nsx_routers: # check if it exists in the neutron DB if not neutron_client.lrouter_id_to_router_id(nsx_router['id']): # Skip non-neutron routers, by tags for tag in nsx_router.get('tags', []): if tag.get('scope') == 'os-neutron-router-id': missing_routers.append(nsx_router) break LOG.info(formatters.output_formatter(constants.ORPHANED_ROUTERS, missing_routers, ['id', 'display_name'])) @admin_utils.output_header def delete_backend_router(resource, event, trigger, **kwargs): nsxlib = utils.get_connected_nsxlib() errmsg = ("Need to specify nsx-id property. Add --property nsx-id=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) nsx_id = properties.get('nsx-id') if not nsx_id: LOG.error("%s", errmsg) return # check if the router exists try: nsxlib.logical_router.get(nsx_id, silent=True) except nsx_exc.ResourceNotFound: # prevent logger from logging this exception sys.exc_clear() LOG.warning("Backend router %s was not found.", nsx_id) return # try to delete it try: # first delete its ports ports = nsxlib.logical_router_port.get_by_router_id(nsx_id) for port in ports: nsxlib.logical_router_port.delete(port['id']) nsxlib.logical_router.delete(nsx_id) except Exception as e: LOG.error("Failed to delete backend router %(id)s : %(e)s.", { 'id': nsx_id, 'e': e}) return # Verify that the router was deleted since the backend does not always # throws errors try: nsxlib.logical_router.get(nsx_id, silent=True) except nsx_exc.ResourceNotFound: # prevent logger from logging this exception sys.exc_clear() LOG.info("Backend router %s was deleted.", nsx_id) else: LOG.error("Failed to delete backend router %s.", nsx_id) @admin_utils.output_header def update_dhcp_relay(resource, event, trigger, **kwargs): """Update all routers dhcp relay service by the current configuration""" nsxlib = utils.get_connected_nsxlib() if not nsxlib.feature_supported(nsx_constants.FEATURE_DHCP_RELAY): version = nsxlib.get_version() LOG.error("DHCP relay is not supported by NSX version %s", version) return admin_cxt = neutron_context.get_admin_context() filters = utils.get_plugin_filters(admin_cxt) with utils.NsxV3PluginWrapper() as plugin: # Make sure FWaaS was initialized plugin.init_fwaas_for_admin_utils() # get all neutron routers and interfaces ports routers = plugin.get_routers(admin_cxt, filters=filters) for router in routers: LOG.info("Updating router %s", router['id']) port_filters = {'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF], 'device_id': [router['id']]} ports = plugin.get_ports(admin_cxt, filters=port_filters) for port in ports: # get the backend router port by the tag nsx_port_id = nsxlib.get_id_by_resource_and_tag( 'LogicalRouterDownLinkPort', 'os-neutron-rport-id', port['id']) if not nsx_port_id: LOG.warning("Couldn't find nsx router port for interface " "%s", port['id']) continue # get the network of this port network_id = port['network_id'] # check the relay service on the az of the network az = plugin.get_network_az_by_net_id(admin_cxt, network_id) nsxlib.logical_router_port.update( nsx_port_id, relay_service_uuid=az.dhcp_relay_service) # if FWaaS is enables, also update the firewall rules try: plugin.update_router_firewall(admin_cxt, router['id']) except Exception: pass LOG.info("Done.") registry.subscribe(list_missing_routers, constants.ROUTERS, shell.Operations.LIST_MISMATCHES.value) registry.subscribe(update_nat_rules, constants.ROUTERS, shell.Operations.NSX_UPDATE_RULES.value) registry.subscribe(list_orphaned_routers, constants.ORPHANED_ROUTERS, shell.Operations.LIST.value) registry.subscribe(delete_backend_router, constants.ORPHANED_ROUTERS, shell.Operations.NSX_CLEAN.value) registry.subscribe(update_dhcp_relay, constants.ROUTERS, shell.Operations.NSX_UPDATE_DHCP_RELAY.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv3/resources/dhcp_servers.py0000666000175100017510000001506513244523345030151 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import registry from neutron_lib import context from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.common import utils as nsx_utils from vmware_nsx.db import db as nsx_db from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils import vmware_nsx.shell.resources as shell from vmware_nsxlib.v3 import nsx_constants LOG = logging.getLogger(__name__) neutron_client = utils.NeutronDbClient() def _get_dhcp_profile_uuid(**kwargs): if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) dhcp_profile_uuid = properties.get('dhcp_profile_uuid') if dhcp_profile_uuid: return dhcp_profile_uuid nsxlib = utils.get_connected_nsxlib() if cfg.CONF.nsx_v3.dhcp_profile: return nsxlib.native_dhcp_profile.get_id_by_name_or_id( cfg.CONF.nsx_v3.dhcp_profile) def _get_orphaned_dhcp_servers(dhcp_profile_uuid): # An orphaned DHCP server means the associated neutron network # does not exist or has no DHCP-enabled subnet. orphaned_servers = [] server_net_pairs = [] # Find matching DHCP servers for a given dhcp_profile_uuid. nsxlib = utils.get_connected_nsxlib() response = nsxlib.dhcp_server.list() for dhcp_server in response['results']: if dhcp_server['dhcp_profile_id'] != dhcp_profile_uuid: continue found = False for tag in dhcp_server['tags']: if tag['scope'] == 'os-neutron-net-id': server_net_pairs.append((dhcp_server, tag['tag'])) found = True break if not found: # The associated neutron network is not defined. dhcp_server['neutron_net_id'] = None orphaned_servers.append(dhcp_server) # Check if there is DHCP-enabled subnet in each network. for dhcp_server, net_id in server_net_pairs: try: network = neutron_client.get_network(net_id) except Exception: # The associated neutron network is not found in DB. dhcp_server['neutron_net_id'] = None orphaned_servers.append(dhcp_server) continue dhcp_enabled = False for subnet_id in network['subnets']: subnet = neutron_client.get_subnet(subnet_id) if subnet['enable_dhcp']: dhcp_enabled = True break if not dhcp_enabled: dhcp_server['neutron_net_id'] = net_id orphaned_servers.append(dhcp_server) return orphaned_servers @admin_utils.output_header def nsx_list_orphaned_dhcp_servers(resource, event, trigger, **kwargs): """List logical DHCP servers without associated DHCP-enabled subnet.""" nsxlib = utils.get_connected_nsxlib() nsx_version = nsxlib.get_version() if not nsx_utils.is_nsx_version_1_1_0(nsx_version): LOG.error("This utility is not available for NSX version %s", nsx_version) return dhcp_profile_uuid = _get_dhcp_profile_uuid(**kwargs) if not dhcp_profile_uuid: LOG.error("dhcp_profile_uuid is not defined") return orphaned_servers = _get_orphaned_dhcp_servers(dhcp_profile_uuid) LOG.info(formatters.output_formatter(constants.ORPHANED_DHCP_SERVERS, orphaned_servers, ['id', 'neutron_net_id'])) @admin_utils.output_header def nsx_clean_orphaned_dhcp_servers(resource, event, trigger, **kwargs): """Remove logical DHCP servers without associated DHCP-enabled subnet.""" # For each orphaned DHCP server, # (1) delete the attached logical DHCP port, # (2) delete the logical DHCP server, # (3) clean corresponding neutron DB entry. nsxlib = utils.get_connected_nsxlib() nsx_version = nsxlib.get_version() if not nsx_utils.is_nsx_version_1_1_0(nsx_version): LOG.error("This utility is not available for NSX version %s", nsx_version) return dhcp_profile_uuid = _get_dhcp_profile_uuid(**kwargs) if not dhcp_profile_uuid: LOG.error("dhcp_profile_uuid is not defined") return cfg.CONF.set_override('dhcp_agent_notification', False) cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3') cfg.CONF.set_override('dhcp_profile', dhcp_profile_uuid, 'nsx_v3') orphaned_servers = _get_orphaned_dhcp_servers(dhcp_profile_uuid) for server in orphaned_servers: try: # TODO(asarfaty): should add this as api to nsxlib instead of # abusing it resource = ('?attachment_type=DHCP_SERVICE&attachment_id=%s' % server['id']) response = nsxlib.logical_port.get(resource) if response and response['result_count'] > 0: nsxlib.logical_port.delete(response['results'][0]['id']) nsxlib.dhcp_server.delete(server['id']) net_id = server.get('neutron_net_id') if net_id: # Delete neutron_net_id -> dhcp_service_id mapping from the DB. nsx_db.delete_neutron_nsx_service_binding( context.get_admin_context().session, net_id, nsx_constants.SERVICE_DHCP) LOG.info("Removed orphaned DHCP server %s", server['id']) except Exception as e: LOG.error("Failed to clean orphaned DHCP server %(id)s. " "Exception: %(e)s", {'id': server['id'], 'e': e}) registry.subscribe(nsx_list_orphaned_dhcp_servers, constants.ORPHANED_DHCP_SERVERS, shell.Operations.NSX_LIST.value) registry.subscribe(nsx_clean_orphaned_dhcp_servers, constants.ORPHANED_DHCP_SERVERS, shell.Operations.NSX_CLEAN.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv3/resources/securitygroups.py0000666000175100017510000003201313244523345030561 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db import api as db_api from neutron.db import common_db_mixin as common_db from neutron.db.models import securitygroup from neutron.db import securitygroups_db from neutron_lib.callbacks import registry from neutron_lib import context as neutron_context from oslo_log import log as logging from vmware_nsx.db import db as nsx_db from vmware_nsx.db import nsx_models from vmware_nsx.extensions import providersecuritygroup as provider_sg from vmware_nsx.extensions import securitygrouplogging as sg_logging from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import ports from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils as v3_utils from vmware_nsx.shell import resources as shell from vmware_nsxlib.v3 import nsx_constants as consts from vmware_nsxlib.v3 import security LOG = logging.getLogger(__name__) class NeutronSecurityGroupApi(securitygroups_db.SecurityGroupDbMixin, common_db.CommonDbMixin): def __init__(self): super(NeutronSecurityGroupApi, self) self.context = neutron_context.get_admin_context() self.filters = v3_utils.get_plugin_filters(self.context) def get_security_groups(self): return super(NeutronSecurityGroupApi, self).get_security_groups(self.context, filters=self.filters) def delete_security_group(self, sg_id): return super(NeutronSecurityGroupApi, self).delete_security_group(self.context, sg_id) def get_nsgroup_id(self, sg_id): return nsx_db.get_nsx_security_group_id( self.context.session, sg_id) def get_port_security_groups(self, port_id): secgroups_bindings = self._get_port_security_group_bindings( self.context, {'port_id': [port_id]}) return [b['security_group_id'] for b in secgroups_bindings] def get_ports_in_security_group(self, security_group_id): secgroups_bindings = self._get_port_security_group_bindings( self.context, {'security_group_id': [security_group_id]}) return [b['port_id'] for b in secgroups_bindings] def delete_security_group_section_mapping(self, sg_id): with db_api.context_manager.writer.using(self.context): fw_mapping = self.context.session.query( nsx_models.NeutronNsxFirewallSectionMapping).filter_by( neutron_id=sg_id).one_or_none() if fw_mapping: self.context.session.delete(fw_mapping) def delete_security_group_backend_mapping(self, sg_id): with db_api.context_manager.writer.using(self.context): sg_mapping = self.context.session.query( nsx_models.NeutronNsxSecurityGroupMapping).filter_by( neutron_id=sg_id).one_or_none() if sg_mapping: self.context.session.delete(sg_mapping) def get_security_groups_mappings(self): q = self.context.session.query( securitygroup.SecurityGroup.name, securitygroup.SecurityGroup.id, nsx_models.NeutronNsxFirewallSectionMapping.nsx_id, nsx_models.NeutronNsxSecurityGroupMapping.nsx_id).join( nsx_models.NeutronNsxFirewallSectionMapping, nsx_models.NeutronNsxSecurityGroupMapping).all() sg_mappings = [{'name': mapp[0], 'id': mapp[1], 'section-id': mapp[2], 'nsx-securitygroup-id': mapp[3]} for mapp in q] return sg_mappings def get_logical_port_id(self, port_id): mapping = self.context.session.query( nsx_models.NeutronNsxPortMapping).filter_by( neutron_id=port_id).one_or_none() if mapping: return mapping.nsx_id neutron_sg = NeutronSecurityGroupApi() neutron_db = v3_utils.NeutronDbClient() def _log_info(resource, data, attrs=['display_name', 'id']): LOG.info(formatters.output_formatter(resource, data, attrs)) @admin_utils.list_handler(constants.SECURITY_GROUPS) @admin_utils.output_header def list_security_groups_mappings(resource, event, trigger, **kwargs): sg_mappings = neutron_sg.get_security_groups_mappings() _log_info(constants.SECURITY_GROUPS, sg_mappings, attrs=['name', 'id', 'section-id', 'nsx-securitygroup-id']) return bool(sg_mappings) @admin_utils.list_handler(constants.FIREWALL_SECTIONS) @admin_utils.output_header def nsx_list_dfw_sections(resource, event, trigger, **kwargs): nsxlib = v3_utils.get_connected_nsxlib() fw_sections = nsxlib.firewall_section.list() _log_info(constants.FIREWALL_SECTIONS, fw_sections) return bool(fw_sections) @admin_utils.list_handler(constants.FIREWALL_NSX_GROUPS) @admin_utils.output_header def nsx_list_security_groups(resource, event, trigger, **kwargs): nsxlib = v3_utils.get_connected_nsxlib() nsx_secgroups = nsxlib.ns_group.list() _log_info(constants.FIREWALL_NSX_GROUPS, nsx_secgroups) return bool(nsx_secgroups) def _find_missing_security_groups(): nsxlib = v3_utils.get_connected_nsxlib() nsx_secgroups = nsxlib.ns_group.list() sg_mappings = neutron_sg.get_security_groups_mappings() missing_secgroups = {} for sg_db in sg_mappings: for nsx_sg in nsx_secgroups: if nsx_sg['id'] == sg_db['nsx-securitygroup-id']: break else: missing_secgroups[sg_db['id']] = sg_db return missing_secgroups @admin_utils.list_mismatches_handler(constants.FIREWALL_NSX_GROUPS) @admin_utils.output_header def list_missing_security_groups(resource, event, trigger, **kwargs): sgs_with_missing_nsx_group = _find_missing_security_groups() missing_securitgroups_info = [ {'securitygroup-name': sg['name'], 'securitygroup-id': sg['id'], 'nsx-securitygroup-id': sg['nsx-securitygroup-id']} for sg in sgs_with_missing_nsx_group.values()] _log_info(constants.FIREWALL_NSX_GROUPS, missing_securitgroups_info, attrs=['securitygroup-name', 'securitygroup-id', 'nsx-securitygroup-id']) return bool(missing_securitgroups_info) def _find_missing_sections(): nsxlib = v3_utils.get_connected_nsxlib() fw_sections = nsxlib.firewall_section.list() sg_mappings = neutron_sg.get_security_groups_mappings() missing_sections = {} for sg_db in sg_mappings: for fw_section in fw_sections: if fw_section['id'] == sg_db['section-id']: break else: missing_sections[sg_db['id']] = sg_db return missing_sections @admin_utils.list_mismatches_handler(constants.FIREWALL_SECTIONS) @admin_utils.output_header def list_missing_firewall_sections(resource, event, trigger, **kwargs): sgs_with_missing_section = _find_missing_sections() missing_sections_info = [{'securitygroup-name': sg['name'], 'securitygroup-id': sg['id'], 'section-id': sg['section-id']} for sg in sgs_with_missing_section.values()] _log_info(constants.FIREWALL_SECTIONS, missing_sections_info, attrs=['securitygroup-name', 'securitygroup-id', 'section-id']) return bool(missing_sections_info) @admin_utils.fix_mismatches_handler(constants.SECURITY_GROUPS) @admin_utils.output_header def fix_security_groups(resource, event, trigger, **kwargs): context_ = neutron_context.get_admin_context() inconsistent_secgroups = _find_missing_sections() inconsistent_secgroups.update(_find_missing_security_groups()) nsxlib = v3_utils.get_connected_nsxlib() with v3_utils.NsxV3PluginWrapper() as plugin: for sg_id, sg in inconsistent_secgroups.items(): secgroup = plugin.get_security_group(context_, sg_id) try: # FIXME(roeyc): try..except clause should be removed once the # api will return 404 response code instead 400 for trying to # delete a non-existing firewall section. nsxlib.firewall_section.delete(sg['section-id']) except Exception: pass nsxlib.ns_group.delete(sg['nsx-securitygroup-id']) neutron_sg.delete_security_group_section_mapping(sg_id) neutron_sg.delete_security_group_backend_mapping(sg_id) nsgroup, fw_section = ( plugin._create_security_group_backend_resources(secgroup)) nsx_db.save_sg_mappings( context_, sg_id, nsgroup['id'], fw_section['id']) # If version > 1.1 then we use dynamic criteria tags, and the port # should already have them. if not nsxlib.feature_supported(consts.FEATURE_DYNAMIC_CRITERIA): members = [] for port_id in neutron_sg.get_ports_in_security_group(sg_id): lport_id = neutron_sg.get_logical_port_id(port_id) members.append(lport_id) nsxlib.ns_group.add_members( nsgroup['id'], consts.TARGET_TYPE_LOGICAL_PORT, members) for rule in secgroup['security_group_rules']: rule_mapping = (context_.session.query( nsx_models.NeutronNsxRuleMapping).filter_by( neutron_id=rule['id']).one()) with context_.session.begin(subtransactions=True): context_.session.delete(rule_mapping) action = (consts.FW_ACTION_DROP if secgroup.get(provider_sg.PROVIDER) else consts.FW_ACTION_ALLOW) rules = plugin._create_firewall_rules( context_, fw_section['id'], nsgroup['id'], secgroup.get(sg_logging.LOGGING, False), action, secgroup['security_group_rules']) plugin.save_security_group_rule_mappings(context_, rules['rules']) def _update_ports_dynamic_criteria_tags(): nsxlib = v3_utils.get_connected_nsxlib() port_client, _ = ports.get_port_and_profile_clients() for port in neutron_db.get_ports(): secgroups = neutron_sg.get_port_security_groups(port['id']) # Nothing to do with ports that are not associated with any sec-group. if not secgroups: continue _, lport_id = neutron_db.get_lswitch_and_lport_id(port['id']) criteria_tags = nsxlib.ns_group.get_lport_tags(secgroups) port_client.update(lport_id, False, tags_update=criteria_tags) def _update_security_group_dynamic_criteria(): nsxlib = v3_utils.get_connected_nsxlib() secgroups = neutron_sg.get_security_groups() for sg in secgroups: nsgroup_id = neutron_sg.get_nsgroup_id(sg['id']) membership_criteria = nsxlib.ns_group.get_port_tag_expression( security.PORT_SG_SCOPE, sg['id']) try: # We want to add the dynamic criteria and remove all direct members # they will be added by the manager using the new criteria. nsxlib.ns_group.update(nsgroup_id, membership_criteria=membership_criteria, members=[]) except Exception as e: LOG.warning("Failed to update membership criteria for nsgroup " "%(nsgroup_id)s, request to backend returned " "with error: %(error)s", {'nsgroup_id': nsgroup_id, 'error': str(e)}) @admin_utils.output_header def migrate_nsgroups_to_dynamic_criteria(resource, event, trigger, **kwargs): nsxlib = v3_utils.get_connected_nsxlib() if not nsxlib.feature_supported(consts.FEATURE_DYNAMIC_CRITERIA): LOG.error("Dynamic criteria grouping feature isn't supported by " "this NSX version.") return # First, we add the criteria tags for all ports. _update_ports_dynamic_criteria_tags() # Update security-groups with dynamic criteria and remove direct members. _update_security_group_dynamic_criteria() registry.subscribe(migrate_nsgroups_to_dynamic_criteria, constants.FIREWALL_NSX_GROUPS, shell.Operations.MIGRATE_TO_DYNAMIC_CRITERIA.value) registry.subscribe(fix_security_groups, constants.FIREWALL_SECTIONS, shell.Operations.NSX_UPDATE.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv3/resources/certificates.py0000666000175100017510000002212413244523345030121 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from vmware_nsx.plugins.nsx_v3 import cert_utils from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils from vmware_nsx.shell import resources as shell from vmware_nsxlib.v3 import client_cert from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import trust_management from neutron_lib.callbacks import registry from neutron_lib import context from oslo_config import cfg LOG = logging.getLogger(__name__) CERT_DEFAULTS = {'key-size': 2048, 'sig-alg': 'sha256', 'valid-days': 3650, 'country': 'US', 'state': 'California', 'org': 'default org', 'unit': 'default unit', 'host': 'defaulthost.org'} def get_nsx_trust_management(**kwargs): username, password = None, None if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) username = properties.get('user') password = properties.get('password') nsx_client = utils.get_nsxv3_client(username, password, True) nsx_trust = trust_management.NsxLibTrustManagement(nsx_client, {}) return nsx_trust def get_certificate_manager(**kwargs): storage_driver_type = cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() LOG.info("Certificate storage is %s", storage_driver_type) if storage_driver_type == 'nsx-db': storage_driver = cert_utils.DbCertificateStorageDriver( context.get_admin_context()) elif storage_driver_type == 'none': storage_driver = cert_utils.DummyCertificateStorageDriver() # TODO(annak) - add support for barbican storage driver return client_cert.ClientCertificateManager( cert_utils.NSX_OPENSTACK_IDENTITY, get_nsx_trust_management(**kwargs), storage_driver) def verify_client_cert_on(): if cfg.CONF.nsx_v3.nsx_use_client_auth: return True LOG.info("Operation not applicable since client authentication " "is disabled") return False @admin_utils.output_header def generate_cert(resource, event, trigger, **kwargs): """Generate self signed client certificate and private key """ if not verify_client_cert_on(): return if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() == "none": LOG.info("Generate operation is not supported " "with storage type 'none'") return # update cert defaults based on user input properties = CERT_DEFAULTS.copy() if kwargs.get('property'): properties.update(admin_utils.parse_multi_keyval_opt( kwargs['property'])) try: prop = 'key-size' key_size = int(properties.get(prop)) prop = 'valid-days' valid_for_days = int(properties.get(prop)) except ValueError: LOG.info("%s property must be a number", prop) return signature_alg = properties.get('sig-alg') subject = {} subject[client_cert.CERT_SUBJECT_COUNTRY] = properties.get('country') subject[client_cert.CERT_SUBJECT_STATE] = properties.get('state') subject[client_cert.CERT_SUBJECT_ORG] = properties.get('org') subject[client_cert.CERT_SUBJECT_UNIT] = properties.get('org') subject[client_cert.CERT_SUBJECT_HOST] = properties.get('host') with get_certificate_manager(**kwargs) as cert: if cert.exists(): LOG.info("Deleting existing certificate") # Need to delete cert first cert.delete() try: cert.generate(subject, key_size, valid_for_days, signature_alg) except exceptions.NsxLibInvalidInput as e: LOG.info(e) return LOG.info("Client certificate generated successfully") @admin_utils.output_header def delete_cert(resource, event, trigger, **kwargs): """Delete client certificate and private key """ if not verify_client_cert_on(): return with get_certificate_manager(**kwargs) as cert: if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() == "none": filename = get_cert_filename(**kwargs) if not filename: LOG.info("Please specify file containing the certificate " "using filename property") return cert.delete_pem(filename) else: if not cert.exists(): LOG.info("Nothing to clean") return cert.delete() LOG.info("Client certificate deleted successfully") @admin_utils.output_header def show_cert(resource, event, trigger, **kwargs): """Show client certificate details """ if not verify_client_cert_on(): return with get_certificate_manager(**kwargs) as cert: if cert.exists(): cert_pem, key_pem = cert.get_pem() expires_on = cert.expires_on() expires_in_days = cert.expires_in_days() cert_data = cert.get_subject() cert_data['alg'] = cert.get_signature_alg() cert_data['key_size'] = cert.get_key_size() if expires_in_days >= 0: LOG.info("Client certificate is valid. " "Expires on %(date)s UTC (in %(days)d days).", {'date': expires_on, 'days': expires_in_days}) else: LOG.info("Client certificate expired on %s.", expires_on) LOG.info("Key Size %(key_size)s, " "Signature Algorithm %(alg)s\n" "Subject: Country %(country)s, State %(state)s, " "Organization %(organization)s, Unit %(unit)s, " "Common Name %(hostname)s", cert_data) LOG.info(cert_pem) else: LOG.info("Client certificate is not registered " "in storage") def get_cert_filename(**kwargs): filename = cfg.CONF.nsx_v3.nsx_client_cert_file if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) filename = properties.get('filename', filename) if not filename: LOG.info("Please specify file containing the certificate " "using filename property") return filename @admin_utils.output_header def import_cert(resource, event, trigger, **kwargs): """Import client certificate that was generated externally""" if not verify_client_cert_on(): return if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() != "none": LOG.info("Import operation is supported " "with storage type 'none' only") return with get_certificate_manager(**kwargs) as cert: if cert.exists(): LOG.info("Deleting existing certificate") cert.delete() filename = get_cert_filename(**kwargs) if not filename: return cert.import_pem(filename) LOG.info("Client certificate imported successfully") @admin_utils.output_header def show_nsx_certs(resource, event, trigger, **kwargs): """Show client certificates associated with openstack identity in NSX""" # Note - this operation is supported even if the feature is disabled nsx_trust = get_nsx_trust_management(**kwargs) ids = nsx_trust.get_identities(cert_utils.NSX_OPENSTACK_IDENTITY) if not ids: LOG.info("Principal identity %s not found", cert_utils.NSX_OPENSTACK_IDENTITY) return LOG.info("Certificate(s) associated with principal identity %s\n", cert_utils.NSX_OPENSTACK_IDENTITY) cert = None for identity in ids: if 'certificate_id' in identity: cert = nsx_trust.get_cert(identity['certificate_id']) LOG.info(cert['pem_encoded']) if not cert: LOG.info("No certificates found") registry.subscribe(generate_cert, constants.CERTIFICATE, shell.Operations.GENERATE.value) registry.subscribe(show_cert, constants.CERTIFICATE, shell.Operations.SHOW.value) registry.subscribe(delete_cert, constants.CERTIFICATE, shell.Operations.CLEAN.value) registry.subscribe(import_cert, constants.CERTIFICATE, shell.Operations.IMPORT.value) registry.subscribe(show_nsx_certs, constants.CERTIFICATE, shell.Operations.NSX_LIST.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv3/resources/networks.py0000666000175100017510000001272313244523345027334 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from vmware_nsx.db import db as nsx_db from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils from vmware_nsx.shell import resources as shell from vmware_nsxlib.v3 import exceptions as nsx_exc from neutron.db import db_base_plugin_v2 from neutron_lib.callbacks import registry from neutron_lib import context as neutron_context from oslo_log import log as logging LOG = logging.getLogger(__name__) neutron_client = utils.NeutronDbClient() def get_network_nsx_id(context, neutron_id): # get the nsx switch id from the DB mapping mappings = nsx_db.get_nsx_switch_ids(context.session, neutron_id) if mappings and len(mappings) > 0: return mappings[0] @admin_utils.output_header def list_missing_networks(resource, event, trigger, **kwargs): """List neutron networks that are missing the NSX backend network """ nsxlib = utils.get_connected_nsxlib() plugin = db_base_plugin_v2.NeutronDbPluginV2() admin_cxt = neutron_context.get_admin_context() filters = utils.get_plugin_filters(admin_cxt) neutron_networks = plugin.get_networks(admin_cxt, filters=filters) networks = [] for net in neutron_networks: neutron_id = net['id'] # get the network nsx id from the mapping table nsx_id = get_network_nsx_id(admin_cxt, neutron_id) if not nsx_id: # skip external networks pass else: try: nsxlib.logical_switch.get(nsx_id) except nsx_exc.ResourceNotFound: networks.append({'name': net['name'], 'neutron_id': neutron_id, 'nsx_id': nsx_id}) if len(networks) > 0: title = ("Found %d internal networks missing from the NSX " "manager:") % len(networks) LOG.info(formatters.output_formatter( title, networks, ['name', 'neutron_id', 'nsx_id'])) else: LOG.info("All internal networks exist on the NSX manager") @admin_utils.output_header def list_orphaned_networks(resource, event, trigger, **kwargs): nsxlib = utils.get_connected_nsxlib() nsx_switches = nsxlib.logical_switch.list()['results'] missing_networks = [] for nsx_switch in nsx_switches: # check if it exists in the neutron DB if not neutron_client.lswitch_id_to_net_id(nsx_switch['id']): # Skip non-neutron networks, by tags neutron_net = False for tag in nsx_switch.get('tags', []): if tag.get('scope') == 'os-neutron-net-id': neutron_net = True break if neutron_net: missing_networks.append(nsx_switch) LOG.info(formatters.output_formatter(constants.ORPHANED_NETWORKS, missing_networks, ['id', 'display_name'])) @admin_utils.output_header def delete_backend_network(resource, event, trigger, **kwargs): errmsg = ("Need to specify nsx-id property. Add --property nsx-id=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) nsx_id = properties.get('nsx-id') if not nsx_id: LOG.error("%s", errmsg) return nsxlib = utils.get_connected_nsxlib() # check if the network exists try: nsxlib.logical_switch.get(nsx_id, silent=True) except nsx_exc.ResourceNotFound: # prevent logger from logging this exception sys.exc_clear() LOG.warning("Backend network %s was not found.", nsx_id) return # try to delete it try: nsxlib.logical_switch.delete(nsx_id) except Exception as e: LOG.error("Failed to delete backend network %(id)s : %(e)s.", { 'id': nsx_id, 'e': e}) return # Verify that the network was deleted since the backend does not always # through errors try: nsxlib.logical_switch.get(nsx_id, silent=True) except nsx_exc.ResourceNotFound: # prevent logger from logging this exception sys.exc_clear() LOG.info("Backend network %s was deleted.", nsx_id) else: LOG.error("Failed to delete backend network %s.", nsx_id) registry.subscribe(list_missing_networks, constants.NETWORKS, shell.Operations.LIST_MISMATCHES.value) registry.subscribe(list_orphaned_networks, constants.ORPHANED_NETWORKS, shell.Operations.LIST.value) registry.subscribe(delete_backend_network, constants.ORPHANED_NETWORKS, shell.Operations.NSX_CLEAN.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv3/resources/metadata_proxy.py0000666000175100017510000002263013244523413030473 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import netaddr from neutron_lib.callbacks import registry from neutron_lib import constants as const from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.common import config # noqa from vmware_nsx.common import utils as nsx_utils from vmware_nsx.dhcp_meta import rpc as nsx_rpc from vmware_nsx.plugins.nsx_v3 import availability_zones as nsx_az from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils import vmware_nsx.shell.resources as shell from vmware_nsxlib.v3 import exceptions as nsx_exc from vmware_nsxlib.v3 import nsx_constants LOG = logging.getLogger(__name__) neutron_client = utils.NeutronDbClient() def _is_metadata_network(network): # If a Neutron network has only one subnet with 169.254.169.252/30 CIDR, # then it is an internal metadata network. if len(network['subnets']) == 1: subnet = neutron_client.get_subnet(network['subnets'][0]) if subnet['cidr'] == nsx_rpc.METADATA_SUBNET_CIDR: return True return False @admin_utils.output_header def list_metadata_networks(resource, event, trigger, **kwargs): """List Metadata networks in Neutron.""" if not cfg.CONF.nsx_v3.native_metadata_route: meta_networks = [network for network in neutron_client.get_networks() if _is_metadata_network(network)] LOG.info(formatters.output_formatter(constants.METADATA_PROXY, meta_networks, ['id', 'name', 'subnets'])) else: nsxlib = utils.get_connected_nsxlib() tags = [{'scope': 'os-neutron-net-id'}] ports = nsxlib.search_by_tags(resource_type='LogicalPort', tags=tags) for port in ports['results']: if port['attachment']['attachment_type'] == 'METADATA_PROXY': net_id = None for tag in port['tags']: if tag['scope'] == 'os-neutron-net-id': net_id = tag['tag'] break uri = '/md-proxies/%s/%s/status' % (port['attachment']['id'], port['logical_switch_id']) status = nsxlib.client.get(uri) LOG.info("Status for MD proxy on neutron network %s (logical " "switch %s) is %s", net_id, port['logical_switch_id'], status.get('proxy_status', 'Unknown')) @admin_utils.output_header def nsx_update_metadata_proxy(resource, event, trigger, **kwargs): """Update Metadata proxy for NSXv3 CrossHairs.""" nsxlib = utils.get_connected_nsxlib() nsx_version = nsxlib.get_version() if not nsx_utils.is_nsx_version_1_1_0(nsx_version): LOG.error("This utility is not available for NSX version %s", nsx_version) return metadata_proxy_uuid = None if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) metadata_proxy_uuid = properties.get('metadata_proxy_uuid') if not metadata_proxy_uuid: LOG.error("metadata_proxy_uuid is not defined") return cfg.CONF.set_override('dhcp_agent_notification', False) cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3') cfg.CONF.set_override('metadata_proxy', metadata_proxy_uuid, 'nsx_v3') with utils.NsxV3PluginWrapper() as plugin: # For each Neutron network, check if it is an internal metadata # network. # If yes, delete the network and associated router interface. # Otherwise, create a logical switch port with MD-Proxy attachment. for network in neutron_client.get_networks(): if _is_metadata_network(network): # It is a metadata network, find the attached router, # remove the router interface and the network. filters = {'device_owner': const.ROUTER_INTERFACE_OWNERS, 'fixed_ips': { 'subnet_id': [network['subnets'][0]], 'ip_address': [nsx_rpc.METADATA_GATEWAY_IP]}} ports = neutron_client.get_ports(filters=filters) if not ports: continue router_id = ports[0]['device_id'] interface = {'subnet_id': network['subnets'][0]} plugin.remove_router_interface(router_id, interface) LOG.info("Removed metadata interface on router %s", router_id) plugin.delete_network(network['id']) LOG.info("Removed metadata network %s", network['id']) else: lswitch_id = neutron_client.net_id_to_lswitch_id( network['id']) if not lswitch_id: continue tags = nsxlib.build_v3_tags_payload( network, resource_type='os-neutron-net-id', project_name='admin') name = nsx_utils.get_name_and_uuid('%s-%s' % ( 'mdproxy', network['name'] or 'network'), network['id']) # check if this logical port already exists existing_ports = nsxlib.logical_port.find_by_display_name( name) if not existing_ports: # create a new port with the md-proxy nsxlib.logical_port.create( lswitch_id, metadata_proxy_uuid, tags=tags, name=name, attachment_type=nsx_constants.ATTACHMENT_MDPROXY) LOG.info("Enabled native metadata proxy for network %s", network['id']) else: # update the MDproxy of this port port = existing_ports[0] nsxlib.logical_port.update( port['id'], metadata_proxy_uuid, attachment_type=nsx_constants.ATTACHMENT_MDPROXY) LOG.info("Updated native metadata proxy for network %s", network['id']) @admin_utils.output_header def nsx_update_metadata_proxy_server_ip(resource, event, trigger, **kwargs): """Update Metadata proxy server ip on the nsx.""" nsxlib = utils.get_connected_nsxlib() nsx_version = nsxlib.get_version() if not nsx_utils.is_nsx_version_1_1_0(nsx_version): LOG.error("This utility is not available for NSX version %s", nsx_version) return server_ip = None az_name = nsx_az.DEFAULT_NAME if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) server_ip = properties.get('server-ip') az_name = properties.get('availability-zone', az_name) if not server_ip or not netaddr.valid_ipv4(server_ip): LOG.error("Need to specify a valid server-ip parameter") return config.register_nsxv3_azs(cfg.CONF, cfg.CONF.nsx_v3.availability_zones) if (az_name != nsx_az.DEFAULT_NAME and az_name not in cfg.CONF.nsx_v3.availability_zones): LOG.error("Availability zone %s was not found in the configuration", az_name) return az = nsx_az.NsxV3AvailabilityZones().get_availability_zone(az_name) az.translate_configured_names_to_uuids(nsxlib) if (not az.metadata_proxy or not cfg.CONF.nsx_v3.native_dhcp_metadata): LOG.error("Native DHCP metadata is not enabled in the configuration " "of availability zone %s", az_name) return metadata_proxy_uuid = az._native_md_proxy_uuid try: mdproxy = nsxlib.native_md_proxy.get(metadata_proxy_uuid) except nsx_exc.ResourceNotFound: LOG.error("metadata proxy %s not found", metadata_proxy_uuid) return # update the IP in the URL url = mdproxy.get('metadata_server_url') url = re.sub(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', server_ip, url) LOG.info("Updating the URL of the metadata proxy server %(uuid)s to " "%(url)s", {'uuid': metadata_proxy_uuid, 'url': url}) nsxlib.native_md_proxy.update(metadata_proxy_uuid, server_url=url) LOG.info("Done.") registry.subscribe(list_metadata_networks, constants.METADATA_PROXY, shell.Operations.LIST.value) registry.subscribe(nsx_update_metadata_proxy, constants.METADATA_PROXY, shell.Operations.NSX_UPDATE.value) registry.subscribe(nsx_update_metadata_proxy_server_ip, constants.METADATA_PROXY, shell.Operations.NSX_UPDATE_IP.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxv3/__init__.py0000666000175100017510000000000013244523345025166 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/common/0000775000175100017510000000000013244524600023267 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/common/utils.py0000666000175100017510000001024713244523345025014 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import six from vmware_nsx._i18n import _ from vmware_nsx.db import db from vmware_nsx.shell import resources as nsxadmin from neutron.common import profiler # noqa from neutron_lib.callbacks import registry from oslo_log import log as logging LOG = logging.getLogger(__name__) def output_header(func): """Decorator to demarcate the output of various hooks. Based on the callback function name we add a header to the cli output. Callback name's should follow the convention of component_operation_it_does to leverage the decorator """ def func_desc(*args, **kwargs): component = '[%s]' % func.__name__.split('_')[0].upper() op_desc = [n.capitalize() for n in func.__name__.split('_')[1:]] LOG.info('==== %(component)s %(operation)s ====', {'component': component, 'operation': ' '.join(op_desc)}) return func(*args, **kwargs) func_desc.__name__ = func.__name__ return func_desc def parse_multi_keyval_opt(opt_list): """Converts a MutliStrOpt to a key-value dict""" result = dict() opt_list = opt_list if opt_list else [] for opt_value in opt_list: try: key, value = opt_value.split('=') result[key] = value except ValueError: raise ValueError(_("Illegal argument [%s]: input should have the " "format of '--property key=value'") % opt_value) return result def query_yes_no(question, default="yes"): """Ask a yes/no question via raw_input() and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits . It must be "yes" (the default), "no" or None (meaning an answer is required of the user). The "answer" return value is True for "yes" or False for "no". """ valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} if default is None: prompt = " [y/n] " elif default == "yes": prompt = " [Y/n] " elif default == "no": prompt = " [y/N] " else: raise ValueError(_("invalid default answer: '%s'") % default) while True: sys.stdout.write(question + prompt) choice = six.moves.input().lower() if default is not None and choice == '': return valid[default] elif choice in valid: return valid[choice] else: sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n") def list_handler(resource): def wrap(func): registry.subscribe(func, resource, nsxadmin.Operations.LIST.value) return func return wrap def list_mismatches_handler(resource): def wrap(func): registry.subscribe(func, resource, nsxadmin.Operations.LIST_MISMATCHES.value) return func return wrap def fix_mismatches_handler(resource): def wrap(func): registry.subscribe(func, resource, nsxadmin.Operations.FIX_MISMATCH.value) return func return wrap def get_plugin_filters(context, plugin): # Return filters for the neutron list apis so that only resources from # a specific plugin will be returned. filters = {} core_plugin = nsxadmin.get_plugin() if core_plugin == 'nsxtvd': maps = db.get_project_plugin_mappings_by_plugin( context.session, plugin) if maps: filters['project_id'] = [m.project for m in maps] return filters vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/common/__init__.py0000666000175100017510000000000013244523345025375 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/common/constants.py0000666000175100017510000000411513244523345025665 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Default conf file locations NEUTRON_CONF = '/etc/neutron/neutron.conf' NSX_INI = '/etc/neutron/plugins/vmware/nsx.ini' # NSX Plugin Constants NSXV3_PLUGIN = 'vmware_nsx.plugin.NsxV3Plugin' NSXV_PLUGIN = 'vmware_nsx.plugin.NsxVPlugin' NSXTVD_PLUGIN = 'vmware_nsx.plugin.NsxTVDPlugin' VMWARE_NSXV = 'vmware_nsxv' VMWARE_NSXV3 = 'vmware_nsxv3' VMWARE_NSXTVD = 'vmware_nsxtvd' # Common Resource Constants NETWORKS = 'networks' ROUTERS = 'routers' DHCP_BINDING = 'dhcp-binding' FIREWALL_SECTIONS = 'firewall-sections' FIREWALL_NSX_GROUPS = 'nsx-security-groups' SECURITY_GROUPS = 'security-groups' CONFIG = 'config' ORPHANED_NETWORKS = 'orphaned-networks' ORPHANED_ROUTERS = 'orphaned-routers' # NSXV3 only Resource Constants PORTS = 'ports' METADATA_PROXY = 'metadata-proxy' ORPHANED_DHCP_SERVERS = 'orphaned-dhcp-servers' CERTIFICATE = 'certificate' LB_SERVICES = 'lb-services' LB_VIRTUAL_SERVERS = 'lb-virtual-servers' LB_POOLS = 'lb-pools' LB_MONITORS = 'lb-monitors' RATE_LIMIT = 'rate-limit' # NSXV only Resource Constants EDGES = 'edges' SPOOFGUARD_POLICY = 'spoofguard-policy' BACKUP_EDGES = 'backup-edges' ORPHANED_EDGES = 'orphaned-edges' ORPHANED_BINDINGS = 'orphaned-bindings' ORPHANED_VNICS = 'orphaned-vnics' MISSING_EDGES = 'missing-edges' METADATA = 'metadata' MISSING_NETWORKS = 'missing-networks' LBAAS = 'lbaas' BGP_GW_EDGE = 'bgp-gw-edge' ROUTING_REDIS_RULE = 'routing-redistribution-rule' BGP_NEIGHBOUR = 'bgp-neighbour' # NSXTV only Resource Constants PROJECTS = 'projects' vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/common/formatters.py0000666000175100017510000000434713244523345026046 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils import prettytable LOG = logging.getLogger(__name__) def output_formatter(resource_name, resources_list, attrs): """Method to format the output response from NSX/Neutron. Depending on the --fmt cli option we format the output as JSON or as a table. """ LOG.info('%(resource_name)s', {'resource_name': resource_name}) if not resources_list: LOG.info('No resources found') return '' fmt = cfg.CONF.fmt if fmt == 'psql': tableout = prettytable.PrettyTable(attrs) tableout.padding_width = 1 tableout.align = "l" for resource in resources_list: resource_list = [] for attr in attrs: resource_list.append(resource.get(attr)) tableout.add_row(resource_list) return tableout elif fmt == 'json': js_output = {} js_output[resource_name] = [] for resource in resources_list: result = {} for attr in attrs: result[attr] = resource[attr] js_output[resource_name].append(result) return jsonutils.dumps(js_output, sort_keys=True, indent=4) def tabulate_results(data): """Method to format the data in a tabular format. Expects a list of tuple with the first tuple in the list; being treated as column headers. """ columns = data.pop(0) table = prettytable.PrettyTable(["%s" % col for col in columns]) for contents in data: table.add_row(["%s" % col for col in contents]) return table vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxtvd/0000775000175100017510000000000013244524600023325 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxtvd/resources/0000775000175100017510000000000013244524600025337 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxtvd/resources/__init__.py0000666000175100017510000000000013244523345027445 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxtvd/resources/migrate.py0000666000175100017510000000414513244523413027350 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_lib.callbacks import registry from neutron_lib import context from vmware_nsx.db import db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell import resources as shell LOG = logging.getLogger(__name__) @admin_utils.output_header def migrate_projects(resource, event, trigger, **kwargs): """Import existing openstack projects to the current plugin""" # TODO(asarfaty): get the projects list from keystone # get the plugin name from the user if not kwargs.get('property'): LOG.error("Need to specify plugin and project parameters") return else: properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) plugin = properties.get('plugin') project = properties.get('project') if not plugin or not project: LOG.error("Need to specify plugin and project parameters") return if plugin not in projectpluginmap.VALID_TYPES: LOG.error("The supported plugins are %s", projectpluginmap.VALID_TYPES) return ctx = context.get_admin_context() if not db.get_project_plugin_mapping(ctx.session, project): db.add_project_plugin_mapping(ctx.session, project, plugin) registry.subscribe(migrate_projects, constants.PROJECTS, shell.Operations.IMPORT.value) vmware-nsx-12.0.1/vmware_nsx/shell/admin/plugins/nsxtvd/__init__.py0000666000175100017510000000000013244523345025433 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/admin/__init__.py0000666000175100017510000000000013244523345022424 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/shell/resources.py0000666000175100017510000003510013244523413021614 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import enum import glob import importlib import os from oslo_config import cfg from oslo_log import log as logging import requests from vmware_nsx.common import config # noqa from vmware_nsx.shell.admin.plugins.common import constants # Suppress the Insecure request warning requests.packages.urllib3.disable_warnings() LOG = logging.getLogger(__name__) class Operations(enum.Enum): LIST = 'list' CLEAN = 'clean' CLEAN_ALL = 'clean-all' CREATE = 'create' DELETE = 'delete' LIST_MISMATCHES = 'list-mismatches' FIX_MISMATCH = 'fix-mismatch' NEUTRON_LIST = 'neutron-list' NEUTRON_CLEAN = 'neutron-clean' NEUTRON_UPDATE = 'neutron-update' NSX_LIST = 'nsx-list' NSX_CLEAN = 'nsx-clean' NSX_UPDATE = 'nsx-update' NSX_UPDATE_ALL = 'nsx-update-all' NSX_UPDATE_SECRET = 'nsx-update-secret' NSX_UPDATE_RULES = 'nsx-update-rules' NSX_UPDATE_DHCP_RELAY = 'nsx-update-dhcp-relay' NSX_UPDATE_IP = 'nsx-update-ip' NSX_RECREATE = 'nsx-recreate' NSX_REDISTRIBURE = 'nsx-redistribute' NSX_REORDER = 'nsx-reorder' NSX_TAG_DEFAULT = 'nsx-tag-default' MIGRATE_TO_DYNAMIC_CRITERIA = 'migrate-to-dynamic-criteria' NSX_MIGRATE_V_V3 = 'nsx-migrate-v-v3' MIGRATE_TO_POLICY = 'migrate-to-policy' NSX_MIGRATE_EXCLUDE_PORTS = 'migrate-exclude-ports' MIGRATE_VDR_DHCP = 'migrate-vdr-dhcp' STATUS = 'status' GENERATE = 'generate' IMPORT = 'import' SHOW = 'show' VALIDATE = 'validate' ops = [op.value for op in Operations] class Resource(object): def __init__(self, name, ops): self.name = name self.supported_ops = ops # Add supported NSX-V3 resources in this dictionary nsxv3_resources = { constants.SECURITY_GROUPS: Resource(constants.SECURITY_GROUPS, [Operations.LIST.value, Operations.FIX_MISMATCH.value]), constants.FIREWALL_SECTIONS: Resource(constants.FIREWALL_SECTIONS, [Operations.LIST.value, Operations.LIST_MISMATCHES.value]), constants.FIREWALL_NSX_GROUPS: Resource( constants.FIREWALL_NSX_GROUPS, [ Operations.LIST.value, Operations.LIST_MISMATCHES.value, Operations.MIGRATE_TO_DYNAMIC_CRITERIA.value]), constants.NETWORKS: Resource(constants.NETWORKS, [Operations.LIST_MISMATCHES.value]), constants.PORTS: Resource(constants.PORTS, [Operations.LIST_MISMATCHES.value, Operations.NSX_TAG_DEFAULT.value, Operations.NSX_MIGRATE_V_V3.value, Operations.NSX_MIGRATE_EXCLUDE_PORTS.value]), constants.ROUTERS: Resource(constants.ROUTERS, [Operations.LIST_MISMATCHES.value, Operations.NSX_UPDATE_RULES.value, Operations.NSX_UPDATE_DHCP_RELAY.value]), constants.DHCP_BINDING: Resource(constants.DHCP_BINDING, [Operations.LIST.value, Operations.NSX_UPDATE.value, Operations.NSX_RECREATE.value]), constants.METADATA_PROXY: Resource(constants.METADATA_PROXY, [Operations.LIST.value, Operations.NSX_UPDATE.value, Operations.NSX_UPDATE_IP.value]), constants.ORPHANED_DHCP_SERVERS: Resource(constants.ORPHANED_DHCP_SERVERS, [Operations.NSX_LIST.value, Operations.NSX_CLEAN.value]), constants.CERTIFICATE: Resource(constants.CERTIFICATE, [Operations.GENERATE.value, Operations.SHOW.value, Operations.CLEAN.value, Operations.IMPORT.value, Operations.NSX_LIST.value]), constants.CONFIG: Resource(constants.CONFIG, [Operations.VALIDATE.value]), constants.ORPHANED_NETWORKS: Resource(constants.ORPHANED_NETWORKS, [Operations.LIST.value, Operations.NSX_CLEAN.value]), constants.ORPHANED_ROUTERS: Resource(constants.ORPHANED_ROUTERS, [Operations.LIST.value, Operations.NSX_CLEAN.value]), constants.LB_SERVICES: Resource(constants.LB_SERVICES, [Operations.LIST.value]), constants.LB_VIRTUAL_SERVERS: Resource(constants.LB_VIRTUAL_SERVERS, [Operations.LIST.value]), constants.LB_POOLS: Resource(constants.LB_POOLS, [Operations.LIST.value]), constants.LB_MONITORS: Resource(constants.LB_MONITORS, [Operations.LIST.value]), constants.RATE_LIMIT: Resource(constants.RATE_LIMIT, [Operations.SHOW.value, Operations.NSX_UPDATE.value]) } # Add supported NSX-V resources in this dictionary nsxv_resources = { constants.EDGES: Resource(constants.EDGES, [Operations.NSX_LIST.value, Operations.NEUTRON_LIST.value, Operations.NSX_UPDATE.value, Operations.NSX_UPDATE_ALL.value]), constants.BACKUP_EDGES: Resource(constants.BACKUP_EDGES, [Operations.LIST.value, Operations.CLEAN.value, Operations.CLEAN_ALL.value, Operations.LIST_MISMATCHES.value, Operations.FIX_MISMATCH.value, Operations.NEUTRON_CLEAN.value]), constants.ORPHANED_EDGES: Resource(constants.ORPHANED_EDGES, [Operations.LIST.value, Operations.CLEAN.value]), constants.ORPHANED_BINDINGS: Resource(constants.ORPHANED_BINDINGS, [Operations.LIST.value, Operations.CLEAN.value]), constants.MISSING_EDGES: Resource(constants.MISSING_EDGES, [Operations.LIST.value]), constants.SPOOFGUARD_POLICY: Resource(constants.SPOOFGUARD_POLICY, [Operations.LIST.value, Operations.CLEAN.value]), constants.DHCP_BINDING: Resource(constants.DHCP_BINDING, [Operations.LIST.value, Operations.NSX_UPDATE.value, Operations.NSX_REDISTRIBURE.value, Operations.NSX_RECREATE.value]), constants.NETWORKS: Resource(constants.NETWORKS, [Operations.LIST.value, Operations.NSX_UPDATE.value]), constants.MISSING_NETWORKS: Resource(constants.MISSING_NETWORKS, [Operations.LIST.value]), constants.ORPHANED_NETWORKS: Resource(constants.ORPHANED_NETWORKS, [Operations.LIST.value, Operations.NSX_CLEAN.value]), constants.SECURITY_GROUPS: Resource(constants.SECURITY_GROUPS, [Operations.LIST.value, Operations.FIX_MISMATCH.value, Operations.MIGRATE_TO_POLICY.value]), constants.FIREWALL_NSX_GROUPS: Resource( constants.FIREWALL_NSX_GROUPS, [Operations.LIST.value, Operations.LIST_MISMATCHES.value]), constants.FIREWALL_SECTIONS: Resource(constants.FIREWALL_SECTIONS, [Operations.LIST.value, Operations.LIST_MISMATCHES.value, Operations.NSX_UPDATE.value, Operations.NSX_REORDER.value]), constants.METADATA: Resource( constants.METADATA, [Operations.NSX_UPDATE.value, Operations.NSX_UPDATE_SECRET.value, Operations.STATUS.value]), constants.ROUTERS: Resource(constants.ROUTERS, [Operations.NSX_RECREATE.value, Operations.NSX_REDISTRIBURE.value, Operations.MIGRATE_VDR_DHCP.value]), constants.ORPHANED_VNICS: Resource(constants.ORPHANED_VNICS, [Operations.NSX_LIST.value, Operations.NSX_CLEAN.value]), constants.CONFIG: Resource(constants.CONFIG, [Operations.VALIDATE.value]), constants.BGP_GW_EDGE: Resource(constants.BGP_GW_EDGE, [Operations.CREATE.value, Operations.DELETE.value, Operations.LIST.value]), constants.ROUTING_REDIS_RULE: Resource(constants.ROUTING_REDIS_RULE, [Operations.CREATE.value, Operations.DELETE.value]), constants.BGP_NEIGHBOUR: Resource(constants.BGP_NEIGHBOUR, [Operations.CREATE.value, Operations.DELETE.value]) } # Add supported NSX-TVD resources in this dictionary # TODO(asarfaty): add v+v3 resources here too nsxtvd_resources = { constants.PROJECTS: Resource(constants.PROJECTS, [Operations.IMPORT.value]), } nsxv3_resources_names = list(nsxv3_resources.keys()) nsxv_resources_names = list(nsxv_resources.keys()) nsxtvd_resources_names = list(nsxtvd_resources.keys()) def get_resources(plugin_dir): modules = glob.glob(plugin_dir + "/*.py") return map(lambda module: os.path.splitext(os.path.basename(module))[0], modules) def get_plugin(): plugin = cfg.CONF.core_plugin plugin_name = '' if plugin in (constants.NSXV3_PLUGIN, constants.VMWARE_NSXV3): plugin_name = 'nsxv3' elif plugin in (constants.NSXV_PLUGIN, constants.VMWARE_NSXV): plugin_name = 'nsxv' elif plugin in (constants.NSXTVD_PLUGIN, constants.VMWARE_NSXTVD): plugin_name = 'nsxtvd' return plugin_name def _get_choices(): plugin = get_plugin() if plugin == 'nsxv3': return nsxv3_resources_names elif plugin == 'nsxv': return nsxv_resources_names elif plugin == 'nsxtvd': return nsxtvd_resources_names def _get_resources(): plugin = get_plugin() if plugin == 'nsxv3': return 'NSX-V3 resources: %s' % (', '.join(nsxv3_resources_names)) elif plugin == 'nsxv': return 'NSX-V resources: %s' % (', '.join(nsxv_resources_names)) elif plugin == 'nsxtvd': return 'NSX-TVD resources: %s' % (', '.join(nsxtvd_resources_names)) cli_opts = [cfg.StrOpt('fmt', short='f', default='psql', choices=['psql', 'json'], help='Supported output formats: json, psql'), cfg.StrOpt('resource', short='r', choices=_get_choices(), help=_get_resources()), cfg.StrOpt('operation', short='o', help='Supported list of operations: {}' .format(', '.join(ops))), cfg.StrOpt('plugin', help='nsxv or nsxv3 if the tvd plugin is used'), cfg.BoolOpt('force', default=False, help='Enables \'force\' mode. No confirmations will ' 'be made before deletions.'), cfg.MultiStrOpt('property', short='p', help='Key-value pair containing the information ' 'to be updated. For ex: key=value.'), cfg.BoolOpt('verbose', short='v', default=False, help='Triggers detailed output for some commands') ] # Describe dependencies between admin utils resources and external libraries # that are not always installed resources_dependencies = { 'nsxv': {'gw_edges': ['neutron_dynamic_routing.extensions']}} def verify_external_dependencies(plugin_name, resource): if plugin_name in resources_dependencies: deps = resources_dependencies[plugin_name] if resource in deps: for d in deps[resource]: try: importlib.import_module(d) except ImportError: return False return True def init_resource_plugin(plugin_name, plugin_dir): plugin_resources = get_resources(plugin_dir) for resource in plugin_resources: if (resource != '__init__'): # skip unsupported resources if not verify_external_dependencies(plugin_name, resource): LOG.info("Skipping resource %s because of dependencies", resource) continue # load the resource importlib.import_module( "vmware_nsx.shell.admin.plugins." "{}.resources.".format(plugin_name) + resource) def get_plugin_dir(plugin_name): plugin_dir = (os.path.dirname(os.path.realpath(__file__)) + "/admin/plugins") return '{}/{}/resources'.format(plugin_dir, plugin_name) vmware-nsx-12.0.1/vmware_nsx/shell/hk_trigger.sh0000666000175100017510000000225413244523345021721 0ustar zuulzuul00000000000000#!/bin/bash # Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # # Trigger execution of NSX plugin's housekeeper # NEUTRON_ENDPOINT=`openstack endpoint list | awk '/network/{print $14}'` if [ -z "$NEUTRON_ENDPOINT" ]; then echo "Couldn't locate Neutron endpoint" exit 1 fi AUTH_TOKEN=`openstack token issue | awk '/ id /{print $4}'` if [ -z "$AUTH_TOKEN" ]; then echo "Couldn't acquire authentication token" exit 1 fi curl -X PUT -s -H "X-Auth-Token: $AUTH_TOKEN" -H 'Content-Type: application/json' -H 'Accept: application/json' -d '{"housekeeper": {}}' ${NEUTRON_ENDPOINT}/v2.0/housekeepers/all exit 0 vmware-nsx-12.0.1/vmware_nsx/shell/nsxadmin.py0000666000175100017510000001333413244523345021434 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Purpose of this script is to build a framework which can be leveraged to build utilities to help the on-field ops in system debugging. TODO: Use Cliff https://pypi.python.org/pypi/cliff TODO: Define commands instead of -r -o like get-security-groups, delete-security-groups, nsx neutron nsxv3 can be options TODO: Add support for other resources, ports, logical switches etc. TODO: Autocomplete command line args """ import sys from neutron.common import config as neutron_config from neutron.conf import common as neutron_common_config from neutron_lib.callbacks import registry from oslo_config import cfg from oslo_log import _options from oslo_log import log as logging import requests from vmware_nsx.common import config # noqa from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin import version from vmware_nsx.shell import resources # Suppress the Insecure request warning requests.packages.urllib3.disable_warnings() LOG = logging.getLogger(__name__) def _init_cfg(): # NOTE(gangila): neutron.common.config registers some options by default # which are then shown in the help message. We don't need them # so we unregister these options cfg.CONF.unregister_opts(_options.common_cli_opts) cfg.CONF.unregister_opts(_options.logging_cli_opts) cfg.CONF.unregister_opts(neutron_common_config.core_cli_opts) # register must come after above unregister to avoid duplicates cfg.CONF.register_cli_opts(resources.cli_opts) # Init the neutron config neutron_config.init(args=['--config-file', constants.NEUTRON_CONF, '--config-file', constants.NSX_INI]) cfg.CONF(args=sys.argv[1:], project='NSX', prog='Admin Utility', version=version.__version__, usage='nsxadmin -r -o ', default_config_files=[constants.NEUTRON_CONF, constants.NSX_INI]) def _validate_resource_choice(resource, nsx_plugin): if nsx_plugin == 'nsxv' and resource not in resources.nsxv_resources: LOG.error('Supported list of NSX-V resources: %s', resources.nsxv_resources_names) sys.exit(1) elif nsx_plugin == 'nsxv3'and resource not in resources.nsxv3_resources: LOG.error('Supported list of NSX-V3 resources: %s', resources.nsxv3_resources_names) sys.exit(1) elif nsx_plugin == 'nsxtvd'and resource not in resources.nsxtvd_resources: LOG.error('Supported list of NSX-TVD resources: %s', resources.nsxtvd_resources_names) sys.exit(1) def _validate_op_choice(choice, nsx_plugin): if nsx_plugin == 'nsxv': supported_resource_ops = \ resources.nsxv_resources[cfg.CONF.resource].supported_ops if choice not in supported_resource_ops: LOG.error('Supported list of operations for the NSX-V ' 'resource %s', supported_resource_ops) sys.exit(1) elif nsx_plugin == 'nsxv3': supported_resource_ops = \ resources.nsxv3_resources[cfg.CONF.resource].supported_ops if choice not in supported_resource_ops: LOG.error('Supported list of operations for the NSX-V3 ' 'resource %s', supported_resource_ops) sys.exit(1) elif nsx_plugin == 'nsxtvd': supported_resource_ops = \ resources.nsxtvd_resources[cfg.CONF.resource].supported_ops if choice not in supported_resource_ops: LOG.error('Supported list of operations for the NSX-TVD ' 'resource %s', supported_resource_ops) sys.exit(1) def _validate_plugin_choice(selected_plugin, nsx_plugin): if nsx_plugin == 'nsxtvd': if selected_plugin: if selected_plugin != 'nsxv' and selected_plugin != 'nsxv3': LOG.error('Illegal plugin %s. please select nsxv or nsxv3', selected_plugin) sys.exit(1) # use nsxv or nsxv3 plugins return selected_plugin else: # use the TVD pluging return nsx_plugin else: if selected_plugin: LOG.error('Cannot select plugin. The current plugin is %s', nsx_plugin) sys.exit(1) return nsx_plugin def main(argv=sys.argv[1:]): _init_cfg() nsx_plugin_in_use = resources.get_plugin() LOG.info('NSX Plugin in use: %s', nsx_plugin_in_use) # the user can select the specific plugin selected_plugin = _validate_plugin_choice(cfg.CONF.plugin, nsx_plugin_in_use) resources.init_resource_plugin( selected_plugin, resources.get_plugin_dir(selected_plugin)) _validate_resource_choice(cfg.CONF.resource, selected_plugin) _validate_op_choice(cfg.CONF.operation, selected_plugin) registry.notify(cfg.CONF.resource, cfg.CONF.operation, 'nsxadmin', force=cfg.CONF.force, property=cfg.CONF.property, verbose=cfg.CONF.verbose) if __name__ == "__main__": sys.exit(main(sys.argv[1:])) vmware-nsx-12.0.1/vmware_nsx/dvs/0000775000175100017510000000000013244524600016713 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/dvs/dvs.py0000666000175100017510000011433413244523345020076 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions from oslo_log import log as logging from oslo_utils import excutils from oslo_vmware import vim_util from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.dvs import dvs_utils LOG = logging.getLogger(__name__) PORTGROUP_PREFIX = 'dvportgroup' API_FIND_ALL_BY_UUID = 'FindAllByUuid' # QoS related constants QOS_IN_DIRECTION = 'incomingPackets' QOS_AGENT_NAME = 'dvfilter-generic-vmware' DSCP_RULE_DESCRIPTION = 'Openstack Dscp Marking RUle' class SingleDvsManager(object): """Management class for dvs related tasks for the dvs plugin For the globally configured dvs. the moref of the configured DVS will be learnt. This will be used in the operations supported by the manager. """ def __init__(self): self._dvs = DvsManager() self._dvs_moref = self._get_dvs_moref_by_name( self._dvs.get_vc_session(), dvs_utils.dvs_name_get()) def _get_dvs_moref_by_name(self, session, dvs_name): """Get the moref of the configured DVS.""" results = session.invoke_api(vim_util, 'get_objects', session.vim, 'DistributedVirtualSwitch', 100) while results: for dvs in results.objects: for prop in dvs.propSet: if dvs_name == prop.val: vim_util.cancel_retrieval(session.vim, results) return dvs.obj results = vim_util.continue_retrieval(session.vim, results) raise nsx_exc.DvsNotFound(dvs=dvs_name) def add_port_group(self, net_id, vlan_tag=None, trunk_mode=False): return self._dvs.add_port_group(self._dvs_moref, net_id, vlan_tag=vlan_tag, trunk_mode=trunk_mode) def delete_port_group(self, net_id): return self._dvs.delete_port_group(self._dvs_moref, net_id) def get_port_group_info(self, net_id): return self._dvs.get_port_group_info(self._dvs_moref, net_id) def net_id_to_moref(self, net_id): return self._dvs._net_id_to_moref(self._dvs_moref, net_id) class VCManagerBase(object): """Base class for all VC related classes, to initialize the session""" def __init__(self): """Initializer. A global session with the VC will be established. NOTE: the DVS port group name will be the Neutron network UUID. """ self._session = dvs_utils.dvs_create_session() def get_vc_session(self): return self._session class DvsManager(VCManagerBase): """Management class for dvs related tasks The dvs-id is not a class member, ince multiple dvs-es can be supported. """ def _get_dvs_moref_by_id(self, dvs_id): return vim_util.get_moref(dvs_id, 'VmwareDistributedVirtualSwitch') def _get_vlan_spec(self, vlan_tag): """Gets portgroup vlan spec.""" # Create the spec for the vlan tag client_factory = self._session.vim.client.factory spec_ns = 'ns0:VmwareDistributedVirtualSwitchVlanIdSpec' vl_spec = client_factory.create(spec_ns) vl_spec.vlanId = vlan_tag vl_spec.inherited = '0' return vl_spec def _get_trunk_vlan_spec(self, start=0, end=4094): """Gets portgroup trunk vlan spec.""" client_factory = self._session.vim.client.factory spec_ns = 'ns0:VmwareDistributedVirtualSwitchTrunkVlanSpec' range = client_factory.create('ns0:NumericRange') range.start = start range.end = end vlan_tag = range vl_spec = client_factory.create(spec_ns) vl_spec.vlanId = vlan_tag vl_spec.inherited = '0' return vl_spec def _get_port_group_spec(self, net_id, vlan_tag, trunk_mode=False, pg_spec=None): """Gets the port groups spec for net_id and vlan_tag.""" client_factory = self._session.vim.client.factory if not pg_spec: pg_spec = client_factory.create('ns0:DVPortgroupConfigSpec') pg_spec.name = net_id pg_spec.type = 'ephemeral' config = client_factory.create('ns0:VMwareDVSPortSetting') if trunk_mode: config.vlan = self._get_trunk_vlan_spec() elif vlan_tag: config.vlan = self._get_vlan_spec(vlan_tag) pg_spec.defaultPortConfig = config return pg_spec def add_port_group(self, dvs_moref, net_id, vlan_tag=None, trunk_mode=False): """Add a new port group to the configured DVS.""" pg_spec = self._get_port_group_spec(net_id, vlan_tag, trunk_mode=trunk_mode) task = self._session.invoke_api(self._session.vim, 'CreateDVPortgroup_Task', dvs_moref, spec=pg_spec) try: # NOTE(garyk): cache the returned moref self._session.wait_for_task(task) except Exception: # NOTE(garyk): handle more specific exceptions with excutils.save_and_reraise_exception(): LOG.exception('Failed to create port group for ' '%(net_id)s with tag %(tag)s.', {'net_id': net_id, 'tag': vlan_tag}) LOG.info("%(net_id)s with tag %(vlan_tag)s created on %(dvs)s.", {'net_id': net_id, 'vlan_tag': vlan_tag, 'dvs': dvs_moref.value}) def _net_id_to_moref(self, dvs_moref, net_id): """Gets the moref for the specific neutron network.""" # NOTE(garyk): return this from a cache if not found then invoke # code below. port_groups = self._session.invoke_api(vim_util, 'get_object_properties', self._session.vim, dvs_moref, ['portgroup']) if len(port_groups) and hasattr(port_groups[0], 'propSet'): for prop in port_groups[0].propSet: for val in prop.val[0]: props = self._session.invoke_api(vim_util, 'get_object_properties', self._session.vim, val, ['name']) if len(props) and hasattr(props[0], 'propSet'): for prop in props[0].propSet: # match name or mor id if net_id == prop.val or net_id == val.value: # NOTE(garyk): update cache return val raise exceptions.NetworkNotFound(net_id=net_id) def _is_vlan_network_by_moref(self, moref): """ This can either be a VXLAN or a VLAN network. The type is determined by the prefix of the moref. """ return moref.startswith(PORTGROUP_PREFIX) def _copy_port_group_spec(self, orig_spec): client_factory = self._session.vim.client.factory pg_spec = client_factory.create('ns0:DVPortgroupConfigSpec') pg_spec.autoExpand = orig_spec['autoExpand'] pg_spec.configVersion = orig_spec['configVersion'] pg_spec.defaultPortConfig = orig_spec['defaultPortConfig'] pg_spec.name = orig_spec['name'] pg_spec.numPorts = orig_spec['numPorts'] pg_spec.policy = orig_spec['policy'] pg_spec.type = orig_spec['type'] return pg_spec def update_port_group_spec_qos(self, pg_spec, qos_data): port_conf = pg_spec.defaultPortConfig # Update the in bandwidth shaping policy # Note: openstack refers to the directions from the VM point of view, # while the NSX refers to the vswitch point of view. # so open stack egress is actually inShaping here. inPol = port_conf.inShapingPolicy if qos_data.egress.bandwidthEnabled: inPol.inherited = False inPol.enabled.inherited = False inPol.enabled.value = True inPol.averageBandwidth.inherited = False inPol.averageBandwidth.value = qos_data.egress.averageBandwidth inPol.peakBandwidth.inherited = False inPol.peakBandwidth.value = qos_data.egress.peakBandwidth inPol.burstSize.inherited = False inPol.burstSize.value = qos_data.egress.burstSize else: inPol.inherited = True outPol = port_conf.outShapingPolicy if qos_data.ingress.bandwidthEnabled: outPol.inherited = False outPol.enabled.inherited = False outPol.enabled.value = True outPol.averageBandwidth.inherited = False outPol.averageBandwidth.value = qos_data.ingress.averageBandwidth outPol.peakBandwidth.inherited = False outPol.peakBandwidth.value = qos_data.ingress.peakBandwidth outPol.burstSize.inherited = False outPol.burstSize.value = qos_data.ingress.burstSize else: outPol.inherited = True # Update the DSCP marking if (port_conf.filterPolicy.inherited or len(port_conf.filterPolicy.filterConfig) == 0 or len(port_conf.filterPolicy.filterConfig[ 0].trafficRuleset.rules) == 0): if qos_data.dscpMarkEnabled: # create the entire structure client_factory = self._session.vim.client.factory filter_rule = client_factory.create('ns0:DvsTrafficRule') filter_rule.description = DSCP_RULE_DESCRIPTION filter_rule.action = client_factory.create( 'ns0:DvsUpdateTagNetworkRuleAction') filter_rule.action.dscpTag = qos_data.dscpMarkValue # mark only incoming packets (openstack egress = nsx ingress) filter_rule.direction = QOS_IN_DIRECTION # Add IP any->any qualifier qualifier = client_factory.create( 'ns0:DvsIpNetworkRuleQualifier') qualifier.protocol = 0 qualifier.sourceAddress = None qualifier.destinationAddress = None filter_rule.qualifier = [qualifier] traffic_filter_config = client_factory.create( 'ns0:DvsTrafficFilterConfig') traffic_filter_config.trafficRuleset.rules = [filter_rule] traffic_filter_config.trafficRuleset.enabled = True traffic_filter_config.agentName = QOS_AGENT_NAME traffic_filter_config.inherited = False port_conf.filterPolicy = client_factory.create( 'ns0:DvsFilterPolicy') port_conf.filterPolicy.filterConfig = [ traffic_filter_config] port_conf.filterPolicy.inherited = False else: # The structure was already initialized filter_policy = port_conf.filterPolicy if qos_data.dscpMarkEnabled: # just update the DSCP value traffic_filter_config = filter_policy.filterConfig[0] filter_rule = traffic_filter_config.trafficRuleset.rules[0] filter_rule.action.dscpTag = qos_data.dscpMarkValue else: # delete the filter policy data filter_policy.filterConfig = [] def _reconfigure_port_group(self, pg_moref, spec_update_calback, spec_update_data): # Get the current configuration of the port group pg_spec = self._session.invoke_api(vim_util, 'get_object_properties', self._session.vim, pg_moref, ['config']) if len(pg_spec) == 0 or len(pg_spec[0].propSet[0]) == 0: LOG.error('Failed to get object properties of %s', pg_moref) raise nsx_exc.DvsNotFound(dvs=pg_moref) # Convert the extracted config to DVPortgroupConfigSpec new_spec = self._copy_port_group_spec(pg_spec[0].propSet[0].val) # Update the configuration using the callback & data spec_update_calback(new_spec, spec_update_data) # Update the port group configuration task = self._session.invoke_api(self._session.vim, 'ReconfigureDVPortgroup_Task', pg_moref, spec=new_spec) try: self._session.wait_for_task(task) except Exception: LOG.error('Failed to reconfigure DVPortGroup %s', pg_moref) raise nsx_exc.DvsNotFound(dvs=pg_moref) # Update the dvs port groups config for a vxlan/vlan network # update the spec using a callback and user data def update_port_groups_config(self, dvs_id, net_id, net_moref, spec_update_calback, spec_update_data): is_vlan = self._is_vlan_network_by_moref(net_moref) if is_vlan: return self._update_net_port_groups_config(net_moref, spec_update_calback, spec_update_data) else: dvs_moref = self._get_dvs_moref_by_id(dvs_id) return self._update_vxlan_port_groups_config(dvs_moref, net_id, net_moref, spec_update_calback, spec_update_data) # Update the dvs port groups config for a vxlan network # Searching the port groups for a partial match to the network id & moref # update the spec using a callback and user data def _update_vxlan_port_groups_config(self, dvs_moref, net_id, net_moref, spec_update_calback, spec_update_data): port_groups = self._session.invoke_api(vim_util, 'get_object_properties', self._session.vim, dvs_moref, ['portgroup']) found = False if len(port_groups) and hasattr(port_groups[0], 'propSet'): for prop in port_groups[0].propSet: for pg_moref in prop.val[0]: props = self._session.invoke_api(vim_util, 'get_object_properties', self._session.vim, pg_moref, ['name']) if len(props) and hasattr(props[0], 'propSet'): for prop in props[0].propSet: if net_id in prop.val and net_moref in prop.val: found = True self._reconfigure_port_group( pg_moref, spec_update_calback, spec_update_data) if not found: raise exceptions.NetworkNotFound(net_id=net_id) # Update the dvs port groups config for a vlan network # Finding the port group using the exact moref of the network # update the spec using a callback and user data def _update_net_port_groups_config(self, net_moref, spec_update_calback, spec_update_data): pg_moref = vim_util.get_moref(net_moref, "DistributedVirtualPortgroup") self._reconfigure_port_group(pg_moref, spec_update_calback, spec_update_data) def delete_port_group(self, dvs_moref, net_id): """Delete a specific port group.""" moref = self._net_id_to_moref(dvs_moref, net_id) task = self._session.invoke_api(self._session.vim, 'Destroy_Task', moref) try: self._session.wait_for_task(task) except Exception: # NOTE(garyk): handle more specific exceptions with excutils.save_and_reraise_exception(): LOG.exception('Failed to delete port group for %s.', net_id) LOG.info("%(net_id)s delete from %(dvs)s.", {'net_id': net_id, 'dvs': dvs_moref.value}) def get_port_group_info(self, dvs_moref, net_id): """Get portgroup information.""" pg_moref = self._net_id_to_moref(dvs_moref, net_id) # Expand the properties to collect on need basis. properties = ['name'] pg_info = self._session.invoke_api(vim_util, 'get_object_properties_dict', self._session.vim, pg_moref, properties) return pg_info def _get_dvs_moref_from_teaming_data(self, teaming_data): """Get the moref dvs that belongs to the teaming data""" if 'switchObj' in teaming_data: if 'objectId' in teaming_data['switchObj']: dvs_id = teaming_data['switchObj']['objectId'] return vim_util.get_moref( dvs_id, 'VmwareDistributedVirtualSwitch') def update_port_group_spec_teaming(self, pg_spec, teaming_data): mapping = {'FAILOVER_ORDER': 'failover_explicit', 'ETHER_CHANNEL': 'loadbalance_ip', 'LACP_ACTIVE': 'loadbalance_ip', 'LACP_PASSIVE': 'loadbalance_ip', 'LACP_V2': 'loadbalance_ip', 'LOADBALANCE_SRCID': 'loadbalance_srcid', 'LOADBALANCE_SRCMAC': 'loadbalance_srcmac', 'LOADBALANCE_LOADBASED': 'loadbalance_loadbased'} dvs_moref = self._get_dvs_moref_from_teaming_data(teaming_data) port_conf = pg_spec.defaultPortConfig policy = port_conf.uplinkTeamingPolicy policy.inherited = False policy.policy.inherited = False policy.policy.value = mapping[teaming_data['teamingPolicy']] policy.uplinkPortOrder.inherited = False ports = teaming_data['failoverUplinkPortNames'] policy.uplinkPortOrder.activeUplinkPort = ports # The standby port will be those not configure as active ones uplinks = self._session.invoke_api(vim_util, "get_object_property", self._session.vim, dvs_moref, "config.uplinkPortPolicy") # VC does not support LAG and normal uplinks. So need to check # if we need to configure standby links if set(ports) & set(uplinks.uplinkPortName): standby = list(set(uplinks.uplinkPortName) - set(ports)) policy.uplinkPortOrder.standbyUplinkPort = standby def update_port_group_spec_name(self, pg_spec, name): pg_spec.name = name def update_port_group_spec_trunk(self, pg_spec, trunk_data): port_conf = pg_spec.defaultPortConfig port_conf.vlan = self._get_trunk_vlan_spec() def update_port_group_security_policy(self, pg_spec, status): policy = pg_spec.policy policy.securityPolicyOverrideAllowed = status def _update_port_security_policy(self, dvs_moref, port, status): client_factory = self._session.vim.client.factory ps = client_factory.create('ns0:DVPortConfigSpec') ps.key = port.portKey ps.operation = 'edit' policy = client_factory.create('ns0:DVSSecurityPolicy') bp = client_factory.create('ns0:BoolPolicy') bp.inherited = False bp.value = status policy.allowPromiscuous = bp policy.forgedTransmits = bp policy.inherited = False setting = client_factory.create('ns0:VMwareDVSPortSetting') setting.securityPolicy = policy ps.setting = setting task = self._session.invoke_api(self._session.vim, 'ReconfigureDVPort_Task', dvs_moref, port=ps) try: self._session.wait_for_task(task) LOG.info("Updated port security status") except Exception as e: LOG.error("Failed to update port %s. Reason: %s", port.key, e) class VMManager(VCManagerBase): """Management class for VMs related VC tasks.""" def get_vm_moref_obj(self, instance_uuid): """Get reference to the VM. The method will make use of FindAllByUuid to get the VM reference. This method finds all VM's on the backend that match the instance_uuid, more specifically all VM's on the backend that have 'config_spec.instanceUuid' set to 'instance_uuid'. """ vm_refs = self._session.invoke_api( self._session.vim, API_FIND_ALL_BY_UUID, self._session.vim.service_content.searchIndex, uuid=instance_uuid, vmSearch=True, instanceUuid=True) if vm_refs: return vm_refs[0] def get_vm_moref(self, instance_uuid): """Get reference to the VM. """ vm_ref = self.get_vm_moref_obj(instance_uuid) if vm_ref: return vm_ref.value def get_vm_spec(self, vm_moref): vm_specs = self._session.invoke_api(vim_util, 'get_object_properties', self._session.vim, vm_moref, ['network']) if vm_specs: return vm_specs[0] def _build_vm_spec_attach(self, neutron_port_id, port_mac, nsx_net_id, device_type): # Code inspired by nova: _create_vif_spec client_factory = self._session.vim.client.factory vm_spec = client_factory.create('ns0:VirtualMachineConfigSpec') device_change = client_factory.create('ns0:VirtualDeviceConfigSpec') device_change.operation = "add" net_device = client_factory.create('ns0:' + device_type) net_device.key = -47 net_device.addressType = "manual" # configure the neutron port id and mac net_device.externalId = neutron_port_id net_device.macAddress = port_mac net_device.wakeOnLanEnabled = True backing = client_factory.create( 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo') # configure the NSX network Id backing.opaqueNetworkId = nsx_net_id backing.opaqueNetworkType = "nsx.LogicalSwitch" net_device.backing = backing connectable_spec = client_factory.create( 'ns0:VirtualDeviceConnectInfo') connectable_spec.startConnected = True connectable_spec.allowGuestControl = True connectable_spec.connected = True net_device.connectable = connectable_spec device_change.device = net_device vm_spec.deviceChange = [device_change] return vm_spec def attach_vm_interface(self, vm_moref, neutron_port_id, port_mac, nsx_net_id, device_type): new_spec = self._build_vm_spec_attach( neutron_port_id, port_mac, nsx_net_id, device_type) task = self._session.invoke_api(self._session.vim, 'ReconfigVM_Task', vm_moref, spec=new_spec) try: self._session.wait_for_task(task) LOG.info("Updated VM moref %(moref)s spec - " "attached an interface", {'moref': vm_moref.value}) except Exception as e: LOG.error("Failed to reconfigure VM %(moref)s spec: %(e)s", {'moref': vm_moref.value, 'e': e}) def _build_vm_spec_detach(self, device): """Builds the vif detach config spec.""" # Code inspired by nova: get_network_detach_config_spec client_factory = self._session.vim.client.factory config_spec = client_factory.create('ns0:VirtualMachineConfigSpec') virtual_device_config = client_factory.create( 'ns0:VirtualDeviceConfigSpec') virtual_device_config.operation = "remove" virtual_device_config.device = device config_spec.deviceChange = [virtual_device_config] return config_spec def detach_vm_interface(self, vm_moref, device): new_spec = self._build_vm_spec_detach(device) task = self._session.invoke_api(self._session.vim, 'ReconfigVM_Task', vm_moref, spec=new_spec) try: self._session.wait_for_task(task) LOG.info("Updated VM %(moref)s spec - detached an interface", {'moref': vm_moref.value}) except Exception as e: LOG.error("Failed to reconfigure vm moref %(moref)s: %(e)s", {'moref': vm_moref.value, 'e': e}) def get_vm_interfaces_info(self, vm_moref): hardware_devices = self._session.invoke_api(vim_util, "get_object_property", self._session.vim, vm_moref, "config.hardware.device") return hardware_devices def _get_device_port(self, device_id, mac_address): vm_moref = self.get_vm_moref_obj(device_id) hardware_devices = self.get_vm_interfaces_info(vm_moref) if not hardware_devices: return if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": hardware_devices = hardware_devices.VirtualDevice for device in hardware_devices: if hasattr(device, 'macAddress'): if device.macAddress == mac_address: return device.backing.port def update_port_security_policy(self, dvs_id, net_id, net_moref, device_id, mac_address, status): dvs_moref = self._get_dvs_moref_by_id(dvs_id) port = self._get_device_port(device_id, mac_address) if port: self._update_port_security_policy(dvs_moref, port, status) class ClusterManager(VCManagerBase): """Management class for Cluster related VC tasks.""" def _reconfigure_cluster(self, session, cluster, config_spec): """Reconfigure a cluster in vcenter""" try: reconfig_task = session.invoke_api( session.vim, "ReconfigureComputeResource_Task", cluster, spec=config_spec, modify=True) session.wait_for_task(reconfig_task) except Exception as excep: LOG.exception('Failed to reconfigure cluster %s', excep) def _create_vm_group_spec(self, client_factory, name, vm_refs, group=None): if group is None: group = client_factory.create('ns0:ClusterVmGroup') group.name = name operation = 'add' else: operation = 'edit' # On vCenter UI, it is not possible to create VM group without # VMs attached to it. But, using APIs, it is possible to create # VM group without VMs attached. Therefore, check for existence # of vm attribute in the group to avoid exceptions if hasattr(group, 'vm'): group.vm += vm_refs else: group.vm = vm_refs group_spec = client_factory.create('ns0:ClusterGroupSpec') group_spec.operation = operation group_spec.info = group return [group_spec] def _create_cluster_rules_spec(self, client_factory, name, vm_group_name, host_group_name): rules_spec = client_factory.create('ns0:ClusterRuleSpec') rules_spec.operation = 'add' policy_class = 'ns0:ClusterVmHostRuleInfo' rules_info = client_factory.create(policy_class) rules_info.enabled = True rules_info.mandatory = False rules_info.name = name rules_info.vmGroupName = vm_group_name rules_info.affineHostGroupName = host_group_name rules_spec.info = rules_info return rules_spec def get_configured_vms(self, resource_id, n_host_groups=2): session = self._session resource = vim_util.get_moref(resource_id, 'ResourcePool') # TODO(garyk): cache the cluster details cluster = session.invoke_api( vim_util, "get_object_property", self._session.vim, resource, "owner") cluster_config = session.invoke_api( vim_util, "get_object_property", self._session.vim, cluster, "configurationEx") configured_vms = [] for index in range(n_host_groups): vm_group = None entry_id = index + 1 groups = [] if hasattr(cluster_config, 'group'): groups = cluster_config.group for group in groups: if 'neutron-group-%s' % entry_id == group.name: vm_group = group break if vm_group and hasattr(vm_group, 'vm'): for vm in vm_group.vm: configured_vms.append(vm.value) return configured_vms def update_cluster_edge_failover(self, resource_id, vm_moids, host_group_names): """Updates cluster for vm placement using DRS""" session = self._session resource = vim_util.get_moref(resource_id, 'ResourcePool') # TODO(garyk): cache the cluster details cluster = session.invoke_api( vim_util, "get_object_property", self._session.vim, resource, "owner") cluster_config = session.invoke_api( vim_util, "get_object_property", self._session.vim, cluster, "configurationEx") vms = [vim_util.get_moref(vm_moid, 'VirtualMachine') if vm_moid else None for vm_moid in vm_moids] client_factory = session.vim.client.factory config_spec = client_factory.create('ns0:ClusterConfigSpecEx') num_host_groups = len(host_group_names) rules = [] if hasattr(cluster_config, 'rule'): rules = cluster_config.rule for index, vm in enumerate(vms, start=1): if not vm: continue vmGroup = None groups = [] if hasattr(cluster_config, 'group'): groups = cluster_config.group for group in groups: if 'neutron-group-%s' % index == group.name: vmGroup = group break # Create/update the VM group groupSpec = self._create_vm_group_spec( client_factory, 'neutron-group-%s' % index, [vm], vmGroup) config_spec.groupSpec.append(groupSpec) config_rule = None # Create the config rule if it does not exist for rule in rules: if 'neutron-rule-%s' % index == rule.name: config_rule = rule break if config_rule is None and index <= num_host_groups: ruleSpec = self._create_cluster_rules_spec( client_factory, 'neutron-rule-%s' % index, 'neutron-group-%s' % index, host_group_names[index - 1]) config_spec.rulesSpec.append(ruleSpec) self._reconfigure_cluster(session, cluster, config_spec) def validate_host_groups(self, resource_id, host_group_names): session = self._session resource = vim_util.get_moref(resource_id, 'ResourcePool') cluster = session.invoke_api( vim_util, "get_object_property", self._session.vim, resource, "owner") client_factory = session.vim.client.factory config_spec = client_factory.create('ns0:ClusterConfigSpecEx') cluster_config = session.invoke_api( vim_util, "get_object_property", self._session.vim, cluster, "configurationEx") groups = [] if hasattr(cluster_config, 'group'): groups = cluster_config.group for host_group_name in host_group_names: found = False for group in groups: if host_group_name == group.name: found = True break if not found: LOG.error("%s does not exist", host_group_name) raise exceptions.NotFound() update_cluster = False num_host_groups = len(host_group_names) rules = [] if hasattr(cluster_config, 'rule'): rules = cluster_config.rule # Ensure that the VM groups are created for index in range(num_host_groups): entry_id = index + 1 vmGroup = None for group in groups: if 'neutron-group-%s' % entry_id == group.name: vmGroup = group break if vmGroup is None: groupSpec = self._create_vm_group_spec( client_factory, 'neutron-group-%s' % entry_id, [], vmGroup) config_spec.groupSpec.append(groupSpec) update_cluster = True config_rule = None # Create the config rule if it does not exist for rule in rules: if 'neutron-rule-%s' % entry_id == rule.name: config_rule = rule break if config_rule is None and index < num_host_groups: ruleSpec = self._create_cluster_rules_spec( client_factory, 'neutron-rule-%s' % entry_id, 'neutron-group-%s' % entry_id, host_group_names[index - 1]) config_spec.rulesSpec.append(ruleSpec) update_cluster = True if update_cluster: try: self._reconfigure_cluster(session, cluster, config_spec) except Exception as e: LOG.error('Unable to update cluster for host groups %s', e) def _delete_vm_group_spec(self, client_factory, name): group_spec = client_factory.create('ns0:ClusterGroupSpec') group = client_factory.create('ns0:ClusterVmGroup') group.name = name group_spec.operation = 'remove' group_spec.removeKey = name group_spec.info = group return [group_spec] def _delete_cluster_rules_spec(self, client_factory, rule): rules_spec = client_factory.create('ns0:ClusterRuleSpec') rules_spec.operation = 'remove' rules_spec.removeKey = int(rule.key) policy_class = 'ns0:ClusterVmHostRuleInfo' rules_info = client_factory.create(policy_class) rules_info.name = rule.name rules_info.vmGroupName = rule.vmGroupName rules_info.affineHostGroupName = rule.affineHostGroupName rules_spec.info = rules_info return rules_spec def cluster_host_group_cleanup(self, resource_id, n_host_groups=2): session = self._session resource = vim_util.get_moref(resource_id, 'ResourcePool') # TODO(garyk): cache the cluster details cluster = session.invoke_api( vim_util, "get_object_property", self._session.vim, resource, "owner") client_factory = session.vim.client.factory config_spec = client_factory.create('ns0:ClusterConfigSpecEx') cluster_config = session.invoke_api( vim_util, "get_object_property", self._session.vim, cluster, "configurationEx") groups = [] if hasattr(cluster_config, 'group'): groups = cluster_config.group rules = [] if hasattr(cluster_config, 'rule'): rules = cluster_config.rule groupSpec = [] ruleSpec = [] for index in range(n_host_groups): entry_id = index + 1 for group in groups: if 'neutron-group-%s' % entry_id == group.name: groupSpec.append(self._delete_vm_group_spec( client_factory, group.name)) # Delete the config rule if it exists for rule in rules: if 'neutron-rule-%s' % entry_id == rule.name: ruleSpec.append(self._delete_cluster_rules_spec( client_factory, rule)) if groupSpec: config_spec.groupSpec = groupSpec if ruleSpec: config_spec.rulesSpec = ruleSpec if groupSpec or ruleSpec: self._reconfigure_cluster(session, cluster, config_spec) class VCManager(DvsManager, VMManager, ClusterManager): """Management class for all vc related tasks.""" pass vmware-nsx-12.0.1/vmware_nsx/dvs/__init__.py0000666000175100017510000000000013244523345021021 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/dvs/dvs_utils.py0000666000175100017510000000667013244523345021321 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_vmware import api from oslo_vmware import exceptions as oslo_vmware_exc from vmware_nsx._i18n import _ dvs_opts = [ cfg.StrOpt('host_ip', help='Hostname or IP address for connection to VMware vCenter ' 'host.'), cfg.PortOpt('host_port', default=443, help='Port for connection to VMware vCenter host.'), cfg.StrOpt('host_username', help='Username for connection to VMware vCenter host.'), cfg.StrOpt('host_password', help='Password for connection to VMware vCenter host.', secret=True), cfg.FloatOpt('task_poll_interval', default=0.5, help='The interval used for polling of remote tasks.'), cfg.StrOpt('ca_file', help='Specify a CA bundle file to use in verifying the ' 'vCenter server certificate.'), cfg.BoolOpt('insecure', default=False, help='If true, the vCenter server certificate is not ' 'verified. If false, then the default CA truststore is ' 'used for verification. This option is ignored if ' '"ca_file" is set.'), cfg.IntOpt('api_retry_count', default=10, help='The number of times we retry on failures, e.g., ' 'socket error, etc.'), cfg.StrOpt('dvs_name', help='The name of the preconfigured DVS.'), cfg.StrOpt('metadata_mode', help=_("This value should not be set. It is just required for " "ensuring that the DVS plugin works with the generic " "NSX metadata code")), ] CONF = cfg.CONF CONF.register_opts(dvs_opts, 'dvs') # Create and register exceptions not in oslo.vmware class DvsOperationBulkFault(oslo_vmware_exc.VimException): msg_fmt = _("Cannot complete a DVS operation for one or more members.") def dvs_register_exceptions(): oslo_vmware_exc.register_fault_class('DvsOperationBulkFault', DvsOperationBulkFault) def dvs_is_enabled(dvs_id=None): """Returns the configured DVS status.""" return bool(CONF.dvs.host_ip and CONF.dvs.host_username and CONF.dvs.host_password and (dvs_id or CONF.dvs.dvs_name)) def dvs_create_session(): return api.VMwareAPISession(CONF.dvs.host_ip, CONF.dvs.host_username, CONF.dvs.host_password, CONF.dvs.api_retry_count, CONF.dvs.task_poll_interval, port=CONF.dvs.host_port, cacert=CONF.dvs.ca_file, insecure=CONF.dvs.insecure) def dvs_name_get(): return CONF.dvs.dvs_name vmware-nsx-12.0.1/vmware_nsx/extension_drivers/0000775000175100017510000000000013244524600021671 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/extension_drivers/dns_integration.py0000666000175100017510000005113713244523345025450 0ustar zuulzuul00000000000000# Copyright (c) 2018 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api.definitions import dns from neutron_lib.api import validators from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import context as n_context from neutron_lib.exceptions import dns as dns_exc from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import log as logging from neutron.objects import network as net_obj from neutron.objects import ports as port_obj from neutron.services.externaldns import driver from vmware_nsx.common import driver_api from vmware_nsx.plugins.nsx_v3 import availability_zones as nsx_az LOG = logging.getLogger(__name__) DNS_DOMAIN_DEFAULT = 'openstacklocal.' def _dotted_domain(dns_domain): if dns_domain.endswith('.'): return dns_domain return '%s.' % dns_domain # TODO(asarfaty) use dns-domain/nameserver from network az instead of global class DNSExtensionDriver(driver_api.ExtensionDriver): _supported_extension_alias = 'dns-integration' @property def extension_alias(self): return self._supported_extension_alias def process_create_network(self, plugin_context, request_data, db_data): dns_domain = request_data.get(dns.DNSDOMAIN) if not validators.is_attr_set(dns_domain): return if dns_domain: net_obj.NetworkDNSDomain(plugin_context, network_id=db_data['id'], dns_domain=dns_domain).create() db_data[dns.DNSDOMAIN] = dns_domain def process_update_network(self, plugin_context, request_data, db_data): new_value = request_data.get(dns.DNSDOMAIN) if not validators.is_attr_set(new_value): return current_dns_domain = db_data.get(dns.DNSDOMAIN) if current_dns_domain == new_value: return net_id = db_data['id'] if current_dns_domain: net_dns_domain = net_obj.NetworkDNSDomain.get_object( plugin_context, network_id=net_id) if new_value: net_dns_domain['dns_domain'] = new_value db_data[dns.DNSDOMAIN] = new_value net_dns_domain.update() else: net_dns_domain.delete() db_data[dns.DNSDOMAIN] = '' elif new_value: net_obj.NetworkDNSDomain(plugin_context, network_id=net_id, dns_domain=new_value).create() db_data[dns.DNSDOMAIN] = new_value def process_create_port(self, plugin_context, request_data, db_data): if not (request_data.get(dns.DNSNAME) or request_data.get(dns.DNSDOMAIN)): return dns_name, is_dns_domain_default = self._get_request_dns_name( request_data, db_data['network_id'], plugin_context) if is_dns_domain_default: return network = self._get_network(plugin_context, db_data['network_id']) self._create_port_dns_record(plugin_context, request_data, db_data, network, dns_name) def _create_port_dns_record(self, plugin_context, request_data, db_data, network, dns_name): external_dns_domain = (request_data.get(dns.DNSDOMAIN) or network.get(dns.DNSDOMAIN)) current_dns_name, current_dns_domain = ( self._calculate_current_dns_name_and_domain( dns_name, external_dns_domain, self.external_dns_not_needed(plugin_context, network))) dns_data_obj = port_obj.PortDNS( plugin_context, port_id=db_data['id'], current_dns_name=current_dns_name, current_dns_domain=current_dns_domain, previous_dns_name='', previous_dns_domain='', dns_name=dns_name, dns_domain=request_data.get(dns.DNSDOMAIN, '')) dns_data_obj.create() return dns_data_obj def _calculate_current_dns_name_and_domain(self, dns_name, external_dns_domain, no_external_dns_service): # When creating a new PortDNS object, the current_dns_name and # current_dns_domain fields hold the data that the integration driver # will send to the external DNS service. They are set to non-blank # values only if all the following conditions are met: # 1) There is an external DNS integration driver configured # 2) The user request contains a valid non-blank value for the port's # dns_name # 3) The user request contains a valid non-blank value for the port's # dns_domain or the port's network has a non-blank value in its # dns_domain attribute are_both_dns_attributes_set = dns_name and external_dns_domain if no_external_dns_service or not are_both_dns_attributes_set: return '', '' return dns_name, external_dns_domain def _update_dns_db(self, dns_name, dns_domain, db_data, plugin_context, has_fixed_ips): dns_data_db = port_obj.PortDNS.get_object( plugin_context, port_id=db_data['id']) if dns_data_db: is_dns_name_changed = (dns_name is not None and dns_data_db['current_dns_name'] != dns_name) if is_dns_name_changed or (has_fixed_ips and dns_data_db['current_dns_name']): dns_data_db['previous_dns_name'] = ( dns_data_db['current_dns_name']) dns_data_db['previous_dns_domain'] = ( dns_data_db['current_dns_domain']) if is_dns_name_changed: dns_data_db[dns.DNSNAME] = dns_name dns_data_db['current_dns_name'] = dns_name if dns_name: dns_data_db['current_dns_domain'] = dns_domain else: dns_data_db['current_dns_domain'] = '' dns_data_db.update() return dns_data_db if dns_name: dns_data_db = port_obj.PortDNS(plugin_context, port_id=db_data['id'], current_dns_name=dns_name, current_dns_domain=dns_domain, previous_dns_name='', previous_dns_domain='', dns_name=dns_name) dns_data_db.create() return dns_data_db def process_update_port(self, plugin_context, request_data, db_data): dns_name = request_data.get(dns.DNSNAME) has_fixed_ips = 'fixed_ips' in request_data if dns_name is None and not has_fixed_ips: return if dns_name is not None: dns_name, is_dns_domain_default = self._get_request_dns_name( request_data, db_data['network_id'], plugin_context) if is_dns_domain_default: self._extend_port_dict(db_data, db_data, None, plugin_context) return network = self._get_network(plugin_context, db_data['network_id']) dns_domain = network[dns.DNSDOMAIN] dns_data_db = None if not dns_domain or self.external_dns_not_needed(plugin_context, network): # No need to update external DNS service. Only process the port's # dns_name attribute if necessary if dns_name is not None: dns_data_db = self._process_only_dns_name_update( plugin_context, db_data, dns_name) else: dns_data_db = self._update_dns_db(dns_name, dns_domain, db_data, plugin_context, has_fixed_ips) self._extend_port_dict(db_data, db_data, dns_data_db, plugin_context) def _process_only_dns_name_update(self, plugin_context, db_data, dns_name): dns_data_db = port_obj.PortDNS.get_object( plugin_context, port_id=db_data['id']) if dns_data_db: dns_data_db['dns_name'] = dns_name dns_data_db.update() return dns_data_db if dns_name: dns_data_db = port_obj.PortDNS(plugin_context, port_id=db_data['id'], current_dns_name='', current_dns_domain='', previous_dns_name='', previous_dns_domain='', dns_name=dns_name) dns_data_db.create() return dns_data_db def external_dns_not_needed(self, context, network): """Decide if ports in network need to be sent to the DNS service. :param context: plugin request context :param network: network dictionary :return: True or False """ pass def extend_network_dict(self, session, db_data, response_data): response_data[dns.DNSDOMAIN] = '' if db_data.dns_domain: response_data[dns.DNSDOMAIN] = db_data.dns_domain[dns.DNSDOMAIN] return response_data def _get_dns_domain(self, network_id, context=None): if not cfg.CONF.dns_domain: return '' return _dotted_domain(cfg.CONF.dns_domain) def _get_request_dns_name(self, port, network_id, context): dns_domain = self._get_dns_domain(network_id, context) if ((dns_domain and dns_domain != DNS_DOMAIN_DEFAULT)): return (port.get(dns.DNSNAME, ''), False) return ('', True) def _get_request_dns_name_and_domain_name(self, dns_data_db, network_id, context): dns_domain = self._get_dns_domain(network_id, context) dns_name = '' if ((dns_domain and dns_domain != DNS_DOMAIN_DEFAULT)): if dns_data_db: dns_name = dns_data_db.dns_name return dns_name, dns_domain def _get_dns_names_for_port(self, ips, dns_data_db, network_id, context): dns_assignment = [] dns_name, dns_domain = self._get_request_dns_name_and_domain_name( dns_data_db, network_id, context) for ip in ips: if dns_name: hostname = dns_name fqdn = dns_name if not dns_name.endswith('.'): fqdn = '%s.%s' % (dns_name, dns_domain) else: hostname = 'host-%s' % ip['ip_address'].replace( '.', '-').replace(':', '-') fqdn = hostname if dns_domain: fqdn = '%s.%s' % (hostname, dns_domain) dns_assignment.append({'ip_address': ip['ip_address'], 'hostname': hostname, 'fqdn': fqdn}) return dns_assignment def _get_dns_name_for_port_get(self, port, dns_data_db, context): if port['fixed_ips']: return self._get_dns_names_for_port( port['fixed_ips'], dns_data_db, port['network_id'], context) return [] def _extend_port_dict(self, db_data, response_data, dns_data_db, context=None): if not dns_data_db: response_data[dns.DNSNAME] = '' else: response_data[dns.DNSNAME] = dns_data_db[dns.DNSNAME] response_data['dns_assignment'] = self._get_dns_name_for_port_get( db_data, dns_data_db, context) return response_data def extend_port_dict(self, session, db_data, response_data): dns_data_db = db_data.dns return self._extend_port_dict(db_data, response_data, dns_data_db) def _get_network(self, context, network_id): plugin = directory.get_plugin() return plugin.get_network(context, network_id) class DNSExtensionDriverNSXv(DNSExtensionDriver): def initialize(self): LOG.info("DNSExtensionDriverNSXv initialization complete") def external_dns_not_needed(self, context, network): dns_driver = _get_dns_driver() if not dns_driver: return True provider_type = network.get('provider:network_type') if not provider_type: return True if network['router:external']: return True return False class DNSExtensionDriverNSXv3(DNSExtensionDriver): def initialize(self): self._availability_zones = nsx_az.NsxV3AvailabilityZones() LOG.info("DNSExtensionDriverNSXv3 initialization complete") def _get_network_az(self, network_id, context): if not context: context = n_context.get_admin_context() network = self._get_network(context, network_id) if az_def.AZ_HINTS in network and network[az_def.AZ_HINTS]: az_name = network[az_def.AZ_HINTS][0] return self._availability_zones.get_availability_zone(az_name) return self._availability_zones.get_default_availability_zone() def _get_dns_domain(self, network_id, context=None): # try to get the dns-domain from the specific availability zone # of this network az = self._get_network_az(network_id, context) if (az.dns_domain and _dotted_domain(az.dns_domain) != _dotted_domain(DNS_DOMAIN_DEFAULT)): dns_domain = az.dns_domain elif (cfg.CONF.nsx_v3.dns_domain and (_dotted_domain(cfg.CONF.nsx_v3.dns_domain) != _dotted_domain(DNS_DOMAIN_DEFAULT))): dns_domain = cfg.CONF.nsx_v3.dns_domain elif cfg.CONF.dns_domain: dns_domain = cfg.CONF.dns_domain else: return '' return _dotted_domain(dns_domain) def external_dns_not_needed(self, context, network): dns_driver = _get_dns_driver() if not dns_driver: return True provider_type = network.get('provider:network_type') if not provider_type: return True if network['router:external']: return True return False class DNSExtensionDriverDVS(DNSExtensionDriver): def initialize(self): LOG.info("DNSExtensionDriverDVS initialization complete") def external_dns_not_needed(self, context, network): dns_driver = _get_dns_driver() if not dns_driver: return True if network['router:external']: return True return False DNS_DRIVER = None def _get_dns_driver(): global DNS_DRIVER if DNS_DRIVER: return DNS_DRIVER if not cfg.CONF.external_dns_driver: return try: DNS_DRIVER = driver.ExternalDNSService.get_instance() LOG.debug("External DNS driver loaded: %s", cfg.CONF.external_dns_driver) return DNS_DRIVER except ImportError: LOG.exception("ImportError exception occurred while loading " "the external DNS service driver") raise dns_exc.ExternalDNSDriverNotFound( driver=cfg.CONF.external_dns_driver) def _send_data_to_external_dns_service(context, dns_driver, dns_domain, dns_name, records): try: dns_driver.create_record_set(context, dns_domain, dns_name, records) except (dns_exc.DNSDomainNotFound, dns_exc.DuplicateRecordSet) as e: LOG.exception("Error publishing port data in external DNS " "service. Name: '%(name)s'. Domain: '%(domain)s'. " "DNS service driver message '%(message)s'", {"name": dns_name, "domain": dns_domain, "message": e.msg}) def _remove_data_from_external_dns_service(context, dns_driver, dns_domain, dns_name, records): try: dns_driver.delete_record_set(context, dns_domain, dns_name, records) except (dns_exc.DNSDomainNotFound, dns_exc.DuplicateRecordSet) as e: LOG.exception("Error deleting port data from external DNS " "service. Name: '%(name)s'. Domain: '%(domain)s'. " "IP addresses '%(ips)s'. DNS service driver message " "'%(message)s'", {"name": dns_name, "domain": dns_domain, "message": e.msg, "ips": ', '.join(records)}) def _create_port_in_external_dns_service(resource, event, trigger, **kwargs): dns_driver = _get_dns_driver() if not dns_driver: return context = kwargs['context'] port = kwargs['port'] dns_data_db = port_obj.PortDNS.get_object( context, port_id=port['id']) if not (dns_data_db and dns_data_db['current_dns_name']): return records = [ip['ip_address'] for ip in port['fixed_ips']] _send_data_to_external_dns_service(context, dns_driver, dns_data_db['current_dns_domain'], dns_data_db['current_dns_name'], records) def _update_port_in_external_dns_service(resource, event, trigger, **kwargs): dns_driver = _get_dns_driver() if not dns_driver: return context = kwargs['context'] updated_port = kwargs['port'] original_port = kwargs.get('original_port') if not original_port: return original_ips = [ip['ip_address'] for ip in original_port['fixed_ips']] updated_ips = [ip['ip_address'] for ip in updated_port['fixed_ips']] is_dns_name_changed = (updated_port[dns.DNSNAME] != original_port[dns.DNSNAME]) is_dns_domain_changed = (dns.DNSDOMAIN in updated_port and updated_port[dns.DNSDOMAIN] != original_port[dns.DNSDOMAIN]) ips_changed = set(original_ips) != set(updated_ips) if not any((is_dns_name_changed, is_dns_domain_changed, ips_changed)): return dns_data_db = port_obj.PortDNS.get_object( context, port_id=updated_port['id']) if not (dns_data_db and (dns_data_db['previous_dns_name'] or dns_data_db['current_dns_name'])): return if dns_data_db['previous_dns_name']: _remove_data_from_external_dns_service( context, dns_driver, dns_data_db['previous_dns_domain'], dns_data_db['previous_dns_name'], original_ips) if dns_data_db['current_dns_name']: _send_data_to_external_dns_service(context, dns_driver, dns_data_db['current_dns_domain'], dns_data_db['current_dns_name'], updated_ips) def _delete_port_in_external_dns_service(resource, event, trigger, **kwargs): dns_driver = _get_dns_driver() if not dns_driver: return context = kwargs['context'] port_id = kwargs['port_id'] dns_data_db = port_obj.PortDNS.get_object( context, port_id=port_id) if not dns_data_db: return if dns_data_db['current_dns_name']: ip_allocations = port_obj.IPAllocation.get_objects(context, port_id=port_id) records = [str(alloc['ip_address']) for alloc in ip_allocations] _remove_data_from_external_dns_service( context, dns_driver, dns_data_db['current_dns_domain'], dns_data_db['current_dns_name'], records) registry.subscribe( _create_port_in_external_dns_service, resources.PORT, events.AFTER_CREATE) registry.subscribe( _update_port_in_external_dns_service, resources.PORT, events.AFTER_UPDATE) registry.subscribe( _delete_port_in_external_dns_service, resources.PORT, events.BEFORE_DELETE) vmware-nsx-12.0.1/vmware_nsx/extension_drivers/__init__.py0000666000175100017510000000117413244523345024014 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os NSX_EXT_PATH = os.path.join(os.path.dirname(__file__), 'extensions') vmware-nsx-12.0.1/vmware_nsx/opts.py0000666000175100017510000000303413244523345017465 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import vmware_nsx.common.config import vmware_nsx.dhcp_meta.lsnmanager import vmware_nsx.dhcp_meta.nsx import vmware_nsx.dvs.dvs_utils import vmware_nsx.extensions.networkgw def list_opts(): return [('DEFAULT', itertools.chain( vmware_nsx.common.config.cluster_opts, vmware_nsx.common.config.connection_opts, vmware_nsx.common.config.nsx_common_opts)), ('NSX', vmware_nsx.common.config.base_opts), ('NSX_SYNC', vmware_nsx.common.config.sync_opts), ('nsxv', vmware_nsx.common.config.nsxv_opts), ('nsx_v3', vmware_nsx.common.config.nsx_v3_opts), ('QUOTAS', vmware_nsx.extensions.networkgw.nw_gw_quota_opts), ('dvs', vmware_nsx.dvs.dvs_utils.dvs_opts), ('NSX_DHCP', vmware_nsx.dhcp_meta.nsx.dhcp_opts), ('NSX_METADATA', vmware_nsx.dhcp_meta.nsx.metadata_opts), ('NSX_LSN', vmware_nsx.dhcp_meta.lsnmanager.lsn_opts)] vmware-nsx-12.0.1/vmware_nsx/db/0000775000175100017510000000000013244524600016504 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/db/nsxv_db.py0000666000175100017510000010552713244523345020542 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import neutron.db.api as db import decorator from neutron_lib.api.definitions import portbindings as pbin from neutron_lib import constants as lib_const from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import excutils import six from sqlalchemy import func from sqlalchemy.orm import exc from sqlalchemy.sql import expression as expr from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import nsxv_constants from vmware_nsx.db import db as nsx_db from vmware_nsx.db import nsxv_models from vmware_nsx.extensions import dhcp_mtu as ext_dhcp_mtu from vmware_nsx.extensions import dns_search_domain as ext_dns_search_domain from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import constants NsxvEdgeDhcpStaticBinding = nsxv_models.NsxvEdgeDhcpStaticBinding LOG = logging.getLogger(__name__) def add_nsxv_router_binding(session, router_id, vse_id, lswitch_id, status, appliance_size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): with session.begin(subtransactions=True): binding = nsxv_models.NsxvRouterBinding( router_id=router_id, edge_id=vse_id, lswitch_id=lswitch_id, status=status, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone) session.add(binding) return binding @decorator.decorator def warn_on_binding_status_error(f, *args, **kwargs): result = f(*args, **kwargs) if result is None: return # we support functions that return a single entry or a list if isinstance(result, list): bindings = result else: bindings = [result] for binding in bindings: if binding and binding['status'] == lib_const.ERROR: LOG.warning("Found NSXV router binding entry with status " "%(status)s: router %(router)s, " "edge %(edge)s, lswitch %(lswitch)s, " "status description: %(desc)s ", {'status': binding['status'], 'router': binding['router_id'], 'edge': binding['edge_id'], 'lswitch': binding['lswitch_id'], 'desc': binding['status_description']}) return result @warn_on_binding_status_error def get_nsxv_router_binding(session, router_id): return session.query(nsxv_models.NsxvRouterBinding).filter_by( router_id=router_id).first() @warn_on_binding_status_error def get_nsxv_router_binding_by_edge(session, edge_id): return session.query(nsxv_models.NsxvRouterBinding).filter_by( edge_id=edge_id).first() @warn_on_binding_status_error def get_nsxv_router_bindings_by_edge(session, edge_id): return session.query(nsxv_models.NsxvRouterBinding).filter_by( edge_id=edge_id).all() @warn_on_binding_status_error def get_nsxv_router_bindings(session, filters=None, like_filters=None): session = db.get_reader_session() query = session.query(nsxv_models.NsxvRouterBinding) return nsx_db._apply_filters_to_query(query, nsxv_models.NsxvRouterBinding, filters, like_filters).all() def update_nsxv_router_binding(session, router_id, **kwargs): with session.begin(subtransactions=True): binding = (session.query(nsxv_models.NsxvRouterBinding). filter_by(router_id=router_id).one()) for key, value in six.iteritems(kwargs): binding[key] = value return binding def delete_nsxv_router_binding(session, router_id): with session.begin(subtransactions=True): binding = (session.query(nsxv_models.NsxvRouterBinding). filter_by(router_id=router_id).first()) if binding: session.delete(binding) def get_edge_availability_zone(session, edge_id): binding = get_nsxv_router_binding_by_edge(session, edge_id) if binding: return binding['availability_zone'] def get_router_availability_zone(session, router_id): binding = get_nsxv_router_binding(session, router_id) if binding: return binding['availability_zone'] def clean_edge_router_binding(session, edge_id): with session.begin(subtransactions=True): (session.query(nsxv_models.NsxvRouterBinding). filter_by(edge_id=edge_id).delete()) def get_edge_vnic_bindings_with_networks(session): query = session.query(nsxv_models.NsxvEdgeVnicBinding) return query.filter( nsxv_models.NsxvEdgeVnicBinding.network_id != expr.null()).all() def get_edge_vnic_binding(session, edge_id, network_id): return session.query(nsxv_models.NsxvEdgeVnicBinding).filter_by( edge_id=edge_id, network_id=network_id).first() def get_edge_vnic_bindings_by_edge(session, edge_id): query = session.query(nsxv_models.NsxvEdgeVnicBinding) return query.filter( nsxv_models.NsxvEdgeVnicBinding.edge_id == edge_id, nsxv_models.NsxvEdgeVnicBinding.network_id != expr.null()).all() def get_edge_vnic_bindings_by_int_lswitch(session, lswitch_id): return session.query(nsxv_models.NsxvEdgeVnicBinding).filter_by( network_id=lswitch_id).all() def create_edge_vnic_binding(session, edge_id, vnic_index, network_id, tunnel_index=-1): with session.begin(subtransactions=True): binding = nsxv_models.NsxvEdgeVnicBinding( edge_id=edge_id, vnic_index=vnic_index, tunnel_index=tunnel_index, network_id=network_id) session.add(binding) return binding def delete_edge_vnic_binding_by_network(session, edge_id, network_id): with session.begin(subtransactions=True): binding = (session.query(nsxv_models.NsxvEdgeVnicBinding). filter_by(edge_id=edge_id, network_id=network_id).one()) session.delete(binding) def init_edge_vnic_binding(session, edge_id): """Init edge vnic binding to preallocated 10 available edge vnics.""" with session.begin(subtransactions=True): for vnic_index in range(constants.MAX_VNIC_NUM)[1:]: start = (vnic_index - 1) * constants.MAX_TUNNEL_NUM stop = vnic_index * constants.MAX_TUNNEL_NUM for tunnel_index in range(start, stop): binding = nsxv_models.NsxvEdgeVnicBinding( edge_id=edge_id, vnic_index=vnic_index, tunnel_index=tunnel_index + 1) session.add(binding) def clean_edge_vnic_binding(session, edge_id): """Clean edge vnic binding.""" with session.begin(subtransactions=True): (session.query(nsxv_models.NsxvEdgeVnicBinding). filter_by(edge_id=edge_id).delete()) def allocate_edge_vnic(session, edge_id, network_id): """Allocate an available edge vnic to network.""" with session.begin(subtransactions=True): bindings = (session.query(nsxv_models.NsxvEdgeVnicBinding). filter_by(edge_id=edge_id, network_id=None).all()) for binding in bindings: if binding['tunnel_index'] % constants.MAX_TUNNEL_NUM == 1: binding['network_id'] = network_id session.add(binding) return binding msg = (_("Edge VNIC: Failed to allocate one available vnic on edge_id: " ":%(edge_id)s to network_id: %(network_id)s") % {'edge_id': edge_id, 'network_id': network_id}) LOG.error(msg) raise nsx_exc.NsxPluginException(err_msg=msg) def allocate_edge_vnic_with_tunnel_index(session, edge_id, network_id, availability_zone): """Allocate an available edge vnic with tunnel index to network.""" # TODO(berlin): temporary solution to let metadata and dhcp use # different vnics int_net = get_nsxv_internal_network( session, constants.InternalEdgePurposes.INTER_EDGE_PURPOSE, availability_zone) metadata_net_id = int_net['network_id'] if int_net else None with session.begin(subtransactions=True): query = session.query(nsxv_models.NsxvEdgeVnicBinding) query = query.filter( nsxv_models.NsxvEdgeVnicBinding.edge_id == edge_id, nsxv_models.NsxvEdgeVnicBinding.network_id == expr.null()) if metadata_net_id: vnic_binding = get_edge_vnic_binding( session, edge_id, metadata_net_id) if vnic_binding: vnic_index = vnic_binding.vnic_index query = query.filter( nsxv_models.NsxvEdgeVnicBinding.vnic_index != vnic_index) binding = query.first() if not binding: msg = (_("Failed to allocate one available vnic on edge_id: " ":%(edge_id)s to network_id: %(network_id)s") % {'edge_id': edge_id, 'network_id': network_id}) LOG.error(msg) raise nsx_exc.NsxPluginException(err_msg=msg) binding['network_id'] = network_id session.add(binding) return binding def allocate_specific_edge_vnic(session, edge_id, vnic_index, tunnel_index, network_id): """Allocate an specific edge vnic to network.""" with session.begin(subtransactions=True): binding = (session.query(nsxv_models.NsxvEdgeVnicBinding). filter_by(edge_id=edge_id, vnic_index=vnic_index, tunnel_index=tunnel_index).one()) binding['network_id'] = network_id session.add(binding) return binding def get_dhcp_edge_network_binding(session, network_id): with session.begin(subtransactions=True): dhcp_router_edges = [binding['edge_id'] for binding in get_nsxv_router_bindings(session) if binding['router_id'].startswith( constants.DHCP_EDGE_PREFIX)] bindings = (session.query(nsxv_models.NsxvEdgeVnicBinding). filter_by(network_id=network_id)) for binding in bindings: edge_id = binding['edge_id'] if edge_id in dhcp_router_edges: return binding def free_edge_vnic_by_network(session, edge_id, network_id): """Free an edge vnic.""" with session.begin(subtransactions=True): binding = (session.query(nsxv_models.NsxvEdgeVnicBinding). filter_by(edge_id=edge_id, network_id=network_id).one()) binding['network_id'] = None session.add(binding) return binding def _create_edge_dhcp_static_binding(session, edge_id, mac_address, binding_id): with session.begin(subtransactions=True): binding = nsxv_models.NsxvEdgeDhcpStaticBinding( edge_id=edge_id, mac_address=mac_address, binding_id=binding_id) session.add(binding) return binding def create_edge_dhcp_static_binding(session, edge_id, mac_address, binding_id): try: return _create_edge_dhcp_static_binding(session, edge_id, mac_address, binding_id) except db_exc.DBDuplicateEntry: LOG.warning('Conflicting DHCP binding entry for ' '%(edge_id)s:%(mac_address)s. Overwriting!', {'edge_id': edge_id, 'mac_address': mac_address}) delete_edge_dhcp_static_binding(session, edge_id, mac_address) return _create_edge_dhcp_static_binding(session, edge_id, mac_address, binding_id) def get_edge_dhcp_static_binding(session, edge_id, mac_address): return session.query(nsxv_models.NsxvEdgeDhcpStaticBinding).filter_by( edge_id=edge_id, mac_address=mac_address).first() def get_dhcp_static_bindings_by_edge(session, edge_id): return session.query(nsxv_models.NsxvEdgeDhcpStaticBinding).filter_by( edge_id=edge_id).all() def delete_edge_dhcp_static_binding(session, edge_id, mac_address): with session.begin(subtransactions=True): session.query(nsxv_models.NsxvEdgeDhcpStaticBinding).filter_by( edge_id=edge_id, mac_address=mac_address).delete() def delete_edge_dhcp_static_binding_id(session, edge_id, binding_id): with session.begin(subtransactions=True): session.query(nsxv_models.NsxvEdgeDhcpStaticBinding).filter_by( edge_id=edge_id, binding_id=binding_id).delete() def get_nsxv_dhcp_bindings_count_per_edge(session): return ( session.query( NsxvEdgeDhcpStaticBinding.edge_id, func.count(NsxvEdgeDhcpStaticBinding.mac_address)).group_by( NsxvEdgeDhcpStaticBinding.edge_id).all()) def clean_edge_dhcp_static_bindings_by_edge(session, edge_id): with session.begin(subtransactions=True): session.query(nsxv_models.NsxvEdgeDhcpStaticBinding).filter_by( edge_id=edge_id).delete() def create_nsxv_internal_network(session, network_purpose, availability_zone, network_id): with session.begin(subtransactions=True): try: network = nsxv_models.NsxvInternalNetworks( network_purpose=network_purpose, network_id=network_id, availability_zone=availability_zone) session.add(network) except db_exc.DBDuplicateEntry: with excutils.save_and_reraise_exception(): LOG.exception("Duplicate internal network for purpose " "%(p)s and availabiltiy zone %(az)s", {'p': network_purpose, 'az': availability_zone}) def get_nsxv_internal_network(session, network_purpose, availability_zone, default_fallback=True): with session.begin(subtransactions=True): net_list = (session.query(nsxv_models.NsxvInternalNetworks). filter_by(network_purpose=network_purpose, availability_zone=availability_zone).all()) if net_list: # Should have only one results as purpose+az are the keys return net_list[0] elif default_fallback and availability_zone != nsx_az.DEFAULT_NAME: # try the default availability zone, since this zone does not # have his own internal edge net_list = (session.query(nsxv_models.NsxvInternalNetworks). filter_by(network_purpose=network_purpose, availability_zone=nsx_az.DEFAULT_NAME).all()) if net_list: return net_list[0] def get_nsxv_internal_network_for_az(session, network_purpose, availability_zone): return get_nsxv_internal_network(session, network_purpose, availability_zone, default_fallback=False) def get_nsxv_internal_networks(session, network_purpose): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvInternalNetworks). filter_by(network_purpose=network_purpose).all()) def get_nsxv_internal_network_by_id(session, network_id): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvInternalNetworks). filter_by(network_id=network_id).first()) def delete_nsxv_internal_network(session, network_purpose, network_id): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvInternalNetworks). filter_by(network_purpose=network_purpose, network_id=network_id).delete()) def create_nsxv_internal_edge(session, ext_ip_address, purpose, router_id): with session.begin(subtransactions=True): try: internal_edge = nsxv_models.NsxvInternalEdges( ext_ip_address=ext_ip_address, purpose=purpose, router_id=router_id) session.add(internal_edge) except db_exc.DBDuplicateEntry: with excutils.save_and_reraise_exception(): LOG.exception("Duplicate internal Edge IP %s", ext_ip_address) def get_nsxv_internal_edge(session, ext_ip_address): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvInternalEdges). filter_by(ext_ip_address=ext_ip_address).all()) def update_nsxv_internal_edge(session, ext_ip_address, router_id): with session.begin(subtransactions=True): edges = get_nsxv_internal_edge(session, ext_ip_address) for edge in edges: edge['router_id'] = router_id def get_nsxv_internal_edges_by_purpose(session, purpose): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvInternalEdges). filter_by(purpose=purpose).all()) def get_nsxv_internal_edge_by_router(session, router_id): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvInternalEdges). filter_by(router_id=router_id).first()) def delete_nsxv_internal_edge(session, ext_ip_address): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvInternalEdges). filter_by(ext_ip_address=ext_ip_address).delete()) def add_neutron_nsx_section_mapping(session, neutron_id, section_id): with session.begin(subtransactions=True): mapping = nsxv_models.NsxvSecurityGroupSectionMapping( neutron_id=neutron_id, ip_section_id=section_id) session.add(mapping) return mapping def add_neutron_nsx_rule_mapping(session, neutron_id, nsx_rule_id): with session.begin(subtransactions=True): mapping = nsxv_models.NsxvRuleMapping(neutron_id=neutron_id, nsx_rule_id=nsx_rule_id) session.add(mapping) return mapping def add_neutron_nsx_port_vnic_mapping(session, neutron_id, nsx_id): with session.begin(subtransactions=True): mapping = nsxv_models.NsxvPortVnicMapping( neutron_id=neutron_id, nsx_id=nsx_id) session.add(mapping) return mapping def get_nsx_section(session, neutron_id): try: mapping = (session.query(nsxv_models.NsxvSecurityGroupSectionMapping). filter_by(neutron_id=neutron_id). one()) return mapping except exc.NoResultFound: LOG.debug("NSX identifiers for neutron security group %s not yet " "stored in Neutron DB", neutron_id) def delete_neutron_nsx_section_mapping(session, neutron_id): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvSecurityGroupSectionMapping). filter_by(neutron_id=neutron_id).delete()) def get_nsx_rule_id(session, neutron_id): try: mapping = (session.query(nsxv_models.NsxvRuleMapping). filter_by(neutron_id=neutron_id). one()) return mapping['nsx_rule_id'] except exc.NoResultFound: LOG.debug("NSX identifiers for neutron rule %s not yet " "stored in Neutron DB", neutron_id) def get_nsx_vnic_id(session, neutron_id): try: mapping = (session.query(nsxv_models.NsxvPortVnicMapping). filter_by(neutron_id=neutron_id). one()) return mapping['nsx_id'] except exc.NoResultFound: LOG.debug("NSX identifiers for neutron port %s not yet " "stored in Neutron DB", neutron_id) def get_network_bindings(session, network_id): session = session or db.get_reader_session() return (session.query(nsxv_models.NsxvTzNetworkBinding). filter_by(network_id=network_id). all()) def get_network_bindings_by_vlanid_and_physical_net(session, vlan_id, phy_uuid): session = session or db.get_reader_session() return (session.query(nsxv_models.NsxvTzNetworkBinding). filter_by(vlan_id=vlan_id, phy_uuid=phy_uuid). all()) def get_network_bindings_by_ids(session, vlan_id, phy_uuid): return get_network_bindings_by_vlanid_and_physical_net( session, vlan_id, phy_uuid) def get_network_bindings_by_physical_net(session, phy_uuid): session = session or db.get_reader_session() return (session.query(nsxv_models.NsxvTzNetworkBinding). filter_by(phy_uuid=phy_uuid). all()) def get_network_bindings_by_physical_net_and_type(session, phy_uuid, binding_type): session = session or db.get_reader_session() return (session.query(nsxv_models.NsxvTzNetworkBinding). filter_by(phy_uuid=phy_uuid, binding_type=binding_type). all()) def delete_network_bindings(session, network_id): return (session.query(nsxv_models.NsxvTzNetworkBinding). filter_by(network_id=network_id).delete()) def add_network_binding(session, network_id, binding_type, phy_uuid, vlan_id): with session.begin(subtransactions=True): binding = nsxv_models.NsxvTzNetworkBinding(network_id, binding_type, phy_uuid, vlan_id) session.add(binding) return binding def get_network_bindings_by_vlanid(session, vlan_id): session = session or db.get_reader_session() return (session.query(nsxv_models.NsxvTzNetworkBinding). filter_by(vlan_id=vlan_id). all()) def update_network_binding_phy_uuid(session, network_id, binding_type, vlan_id, phy_uuid): with session.begin(subtransactions=True): bindings = (session.query(nsxv_models.NsxvTzNetworkBinding).filter_by( vlan_id=vlan_id, network_id=network_id, binding_type=binding_type).all()) for binding in bindings: binding['phy_uuid'] = phy_uuid # # Edge Firewall binding methods # def add_nsxv_edge_firewallrule_binding(session, map_info): with session.begin(subtransactions=True): binding = nsxv_models.NsxvEdgeFirewallRuleBinding( rule_id=map_info['rule_id'], rule_vse_id=map_info['rule_vseid'], edge_id=map_info['edge_id']) session.add(binding) return binding def delete_nsxv_edge_firewallrule_binding(session, id): with session.begin(subtransactions=True): if not (session.query(nsxv_models.NsxvEdgeFirewallRuleBinding). filter_by(rule_id=id).delete()): msg = _("Rule Resource binding with id:%s not found!") % id raise nsx_exc.NsxPluginException(err_msg=msg) def get_nsxv_edge_firewallrule_binding(session, id, edge_id): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvEdgeFirewallRuleBinding). filter_by(rule_id=id, edge_id=edge_id).first()) def get_nsxv_edge_firewallrule_binding_by_vseid( session, edge_id, rule_vseid): with session.begin(subtransactions=True): try: return (session.query(nsxv_models.NsxvEdgeFirewallRuleBinding). filter_by(edge_id=edge_id, rule_vse_id=rule_vseid).one()) except exc.NoResultFound: return def cleanup_nsxv_edge_firewallrule_binding(session, edge_id): with session.begin(subtransactions=True): session.query( nsxv_models.NsxvEdgeFirewallRuleBinding).filter_by( edge_id=edge_id).delete() def map_spoofguard_policy_for_network(session, network_id, policy_id): with session.begin(subtransactions=True): mapping = nsxv_models.NsxvSpoofGuardPolicyNetworkMapping( network_id=network_id, policy_id=policy_id) session.add(mapping) return mapping def get_spoofguard_policy_id(session, network_id): try: mapping = (session.query( nsxv_models.NsxvSpoofGuardPolicyNetworkMapping). filter_by(network_id=network_id).one()) return mapping['policy_id'] except exc.NoResultFound: LOG.debug("SpoofGuard Policy for network %s was not found", network_id) def get_nsxv_spoofguard_policy_network_mappings(session, filters=None, like_filters=None): session = db.get_reader_session() query = session.query(nsxv_models.NsxvSpoofGuardPolicyNetworkMapping) return nsx_db._apply_filters_to_query( query, nsxv_models.NsxvSpoofGuardPolicyNetworkMapping, filters, like_filters).all() def add_nsxv_lbaas_loadbalancer_binding( session, loadbalancer_id, edge_id, edge_fw_rule_id, vip_address): with session.begin(subtransactions=True): binding = nsxv_models.NsxvLbaasLoadbalancerBinding( loadbalancer_id=loadbalancer_id, edge_id=edge_id, edge_fw_rule_id=edge_fw_rule_id, vip_address=vip_address) session.add(binding) return binding def get_nsxv_lbaas_loadbalancer_binding(session, loadbalancer_id): try: return session.query( nsxv_models.NsxvLbaasLoadbalancerBinding).filter_by( loadbalancer_id=loadbalancer_id).one() except exc.NoResultFound: return def get_nsxv_lbaas_loadbalancer_binding_by_edge(session, edge_id): return session.query( nsxv_models.NsxvLbaasLoadbalancerBinding).filter_by( edge_id=edge_id).all() def del_nsxv_lbaas_loadbalancer_binding(session, loadbalancer_id): return (session.query(nsxv_models.NsxvLbaasLoadbalancerBinding). filter_by(loadbalancer_id=loadbalancer_id).delete()) def add_nsxv_lbaas_listener_binding(session, loadbalancer_id, listener_id, app_profile_id, vse_id): with session.begin(subtransactions=True): binding = nsxv_models.NsxvLbaasListenerBinding( loadbalancer_id=loadbalancer_id, listener_id=listener_id, app_profile_id=app_profile_id, vse_id=vse_id) session.add(binding) return binding def get_nsxv_lbaas_listener_binding(session, loadbalancer_id, listener_id): try: return session.query( nsxv_models.NsxvLbaasListenerBinding).filter_by( loadbalancer_id=loadbalancer_id, listener_id=listener_id).one() except exc.NoResultFound: return def del_nsxv_lbaas_listener_binding(session, loadbalancer_id, listener_id): return (session.query(nsxv_models.NsxvLbaasListenerBinding). filter_by(loadbalancer_id=loadbalancer_id, listener_id=listener_id).delete()) def add_nsxv_lbaas_pool_binding(session, loadbalancer_id, pool_id, edge_pool_id): with session.begin(subtransactions=True): binding = nsxv_models.NsxvLbaasPoolBinding( loadbalancer_id=loadbalancer_id, pool_id=pool_id, edge_pool_id=edge_pool_id) session.add(binding) return binding def get_nsxv_lbaas_pool_binding(session, loadbalancer_id, pool_id): try: return session.query( nsxv_models.NsxvLbaasPoolBinding).filter_by( loadbalancer_id=loadbalancer_id, pool_id=pool_id).one() except exc.NoResultFound: return def del_nsxv_lbaas_pool_binding(session, loadbalancer_id, pool_id): return (session.query(nsxv_models.NsxvLbaasPoolBinding). filter_by(loadbalancer_id=loadbalancer_id, pool_id=pool_id).delete()) def add_nsxv_lbaas_monitor_binding(session, loadbalancer_id, pool_id, hm_id, edge_id, edge_mon_id): with session.begin(subtransactions=True): binding = nsxv_models.NsxvLbaasMonitorBinding( loadbalancer_id=loadbalancer_id, pool_id=pool_id, hm_id=hm_id, edge_id=edge_id, edge_mon_id=edge_mon_id) session.add(binding) return binding def get_nsxv_lbaas_monitor_binding(session, loadbalancer_id, pool_id, hm_id, edge_id): try: return session.query( nsxv_models.NsxvLbaasMonitorBinding).filter_by( loadbalancer_id=loadbalancer_id, pool_id=pool_id, hm_id=hm_id, edge_id=edge_id).one() except exc.NoResultFound: return def del_nsxv_lbaas_monitor_binding(session, loadbalancer_id, pool_id, hm_id, edge_id): return (session.query(nsxv_models.NsxvLbaasMonitorBinding). filter_by(loadbalancer_id=loadbalancer_id, pool_id=pool_id, hm_id=hm_id, edge_id=edge_id).delete()) def add_nsxv_lbaas_certificate_binding(session, cert_id, edge_id, edge_cert_id): with session.begin(subtransactions=True): binding = nsxv_models.NsxvLbaasCertificateBinding( cert_id=cert_id, edge_id=edge_id, edge_cert_id=edge_cert_id) session.add(binding) return binding def get_nsxv_lbaas_certificate_binding(session, cert_id, edge_id): try: return session.query( nsxv_models.NsxvLbaasCertificateBinding).filter_by( cert_id=cert_id, edge_id=edge_id).one() except exc.NoResultFound: return def del_nsxv_lbaas_certificate_binding(session, cert_id, edge_id): return (session.query(nsxv_models.NsxvLbaasCertificateBinding). filter_by(cert_id=cert_id, edge_id=edge_id).delete()) def add_nsxv_lbaas_l7policy_binding(session, policy_id, edge_id, edge_app_rule_id): with session.begin(subtransactions=True): binding = nsxv_models.NsxvLbaasL7PolicyBinding( policy_id=policy_id, edge_id=edge_id, edge_app_rule_id=edge_app_rule_id) session.add(binding) return binding def get_nsxv_lbaas_l7policy_binding(session, policy_id): try: return session.query( nsxv_models.NsxvLbaasL7PolicyBinding).filter_by( policy_id=policy_id).one() except exc.NoResultFound: return def del_nsxv_lbaas_l7policy_binding(session, policy_id): try: return (session.query(nsxv_models.NsxvLbaasL7PolicyBinding). filter_by(policy_id=policy_id).delete()) except exc.NoResultFound: return def add_nsxv_subnet_ext_attributes(session, subnet_id, dns_search_domain=None, dhcp_mtu=None): with session.begin(subtransactions=True): binding = nsxv_models.NsxvSubnetExtAttributes( subnet_id=subnet_id, dns_search_domain=dns_search_domain, dhcp_mtu=dhcp_mtu) session.add(binding) return binding def get_nsxv_subnet_ext_attributes(session, subnet_id): try: return session.query( nsxv_models.NsxvSubnetExtAttributes).filter_by( subnet_id=subnet_id).one() except exc.NoResultFound: return def update_nsxv_subnet_ext_attributes(session, subnet_id, dns_search_domain=None, dhcp_mtu=None): with session.begin(subtransactions=True): binding = (session.query(nsxv_models.NsxvSubnetExtAttributes). filter_by(subnet_id=subnet_id).one()) binding[ext_dns_search_domain.DNS_SEARCH_DOMAIN] = dns_search_domain binding[ext_dhcp_mtu.DHCP_MTU] = dhcp_mtu return binding def add_nsxv_port_ext_attributes(session, port_id, vnic_type=pbin.VNIC_NORMAL): with session.begin(subtransactions=True): binding = nsxv_models.NsxvPortExtAttributes( port_id=port_id, vnic_type=vnic_type) session.add(binding) return binding def get_nsxv_ext_attr_port_vnic_type(session, port_id): try: binding = session.query(nsxv_models.NsxvPortExtAttributes).filter_by( port_id=port_id).one() return binding['vnic_type'] except exc.NoResultFound: return pbin.VNIC_NORMAL def update_nsxv_port_ext_attributes(session, port_id, vnic_type=pbin.VNIC_NORMAL): try: binding = session.query( nsxv_models.NsxvPortExtAttributes).filter_by( port_id=port_id).one() binding['vnic_type'] = vnic_type return binding except exc.NoResultFound: return add_nsxv_port_ext_attributes( session, port_id, vnic_type=vnic_type) def add_nsxv_bgp_speaker_binding(session, edge_id, speaker_id, bgp_identifier): with session.begin(subtransactions=True): binding = nsxv_models.NsxvBgpSpeakerBinding( edge_id=edge_id, bgp_speaker_id=speaker_id, bgp_identifier=bgp_identifier) session.add(binding) return binding def get_nsxv_bgp_speaker_binding(session, edge_id): try: binding = (session.query(nsxv_models.NsxvBgpSpeakerBinding). filter_by(edge_id=edge_id). one()) return binding except exc.NoResultFound: LOG.debug("No dynamic routing enabled on edge %s.", edge_id) def get_nsxv_bgp_speaker_bindings(session, speaker_id): try: return (session.query(nsxv_models.NsxvBgpSpeakerBinding). filter_by(bgp_speaker_id=speaker_id).all()) except exc.NoResultFound: return [] def delete_nsxv_bgp_speaker_binding(session, edge_id): binding = session.query( nsxv_models.NsxvBgpSpeakerBinding).filter_by(edge_id=edge_id) if binding: binding.delete() def add_nsxv_bgp_peer_edge_binding(session, peer_id, edge_id): with session.begin(subtransactions=True): binding = nsxv_models.NsxvBgpPeerEdgeBinding(edge_id=edge_id, peer_id=peer_id) session.add(binding) return binding def get_nsxv_bgp_peer_edge_binding(session, peer_id): try: binding = (session.query(nsxv_models.NsxvBgpPeerEdgeBinding). filter_by(peer_id=peer_id).one()) return binding except exc.NoResultFound: pass vmware-nsx-12.0.1/vmware_nsx/db/lsn_db.py0000666000175100017510000001001313244523345020321 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_db import exception as d_exc from oslo_log import log as logging from sqlalchemy import orm from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as p_exc from vmware_nsx.db import nsx_models from neutron.db import api as db_api LOG = logging.getLogger(__name__) def lsn_add(context, network_id, lsn_id): """Add Logical Service Node information to persistent datastore.""" with db_api.context_manager.writer.using(context): lsn = nsx_models.Lsn(network_id, lsn_id) context.session.add(lsn) def lsn_remove(context, lsn_id): """Remove Logical Service Node information from datastore given its id.""" with db_api.context_manager.writer.using(context): context.session.query(nsx_models.Lsn).filter_by(lsn_id=lsn_id).delete() def lsn_remove_for_network(context, network_id): """Remove information about the Logical Service Node given its network.""" with db_api.context_manager.writer.using(context): context.session.query(nsx_models.Lsn).filter_by( net_id=network_id).delete() def lsn_get_for_network(context, network_id, raise_on_err=True): """Retrieve LSN information given its network id.""" query = context.session.query(nsx_models.Lsn) try: return query.filter_by(net_id=network_id).one() except (orm.exc.NoResultFound, d_exc.DBError): msg = _('Unable to find Logical Service Node for network %s') if raise_on_err: LOG.error(msg, network_id) raise p_exc.LsnNotFound(entity='network', entity_id=network_id) else: LOG.warning(msg, network_id) def lsn_port_add_for_lsn(context, lsn_port_id, subnet_id, mac, lsn_id): """Add Logical Service Node Port information to persistent datastore.""" with db_api.context_manager.writer.using(context): lsn_port = nsx_models.LsnPort(lsn_port_id, subnet_id, mac, lsn_id) context.session.add(lsn_port) def lsn_port_get_for_subnet(context, subnet_id, raise_on_err=True): """Return Logical Service Node Port information given its subnet id.""" with db_api.context_manager.reader.using(context): try: return (context.session.query(nsx_models.LsnPort). filter_by(sub_id=subnet_id).one()) except (orm.exc.NoResultFound, d_exc.DBError): if raise_on_err: raise p_exc.LsnPortNotFound(lsn_id=None, entity='subnet', entity_id=subnet_id) def lsn_port_get_for_mac(context, mac_address, raise_on_err=True): """Return Logical Service Node Port information given its mac address.""" with db_api.context_manager.reader.using(context): try: return (context.session.query(nsx_models.LsnPort). filter_by(mac_addr=mac_address).one()) except (orm.exc.NoResultFound, d_exc.DBError): if raise_on_err: raise p_exc.LsnPortNotFound(lsn_id=None, entity='mac', entity_id=mac_address) def lsn_port_remove(context, lsn_port_id): """Remove Logical Service Node port from the given Logical Service Node.""" with db_api.context_manager.writer.using(context): (context.session.query(nsx_models.LsnPort). filter_by(lsn_port_id=lsn_port_id).delete()) vmware-nsx-12.0.1/vmware_nsx/db/routertype.py0000666000175100017510000000210113244523345021301 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from vmware_nsx.common import nsxv_constants from vmware_nsx.db import ( distributedrouter as dist_rtr) from vmware_nsx.extensions import routertype as rt_rtr class RouterType_mixin(dist_rtr.DistributedRouter_mixin): """Mixin class to enable Router type support.""" nsx_attributes = ( dist_rtr.DistributedRouter_mixin.nsx_attributes + [{ 'name': rt_rtr.ROUTER_TYPE, 'default': nsxv_constants.SHARED }]) vmware-nsx-12.0.1/vmware_nsx/db/nsx_models.py0000666000175100017510000005254613244523345021254 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NSX data models. This module defines data models used by the VMware NSX plugin family. """ from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy import sql from neutron.db import models_v2 from oslo_db.sqlalchemy import models from vmware_nsxlib.v3 import nsx_constants class TzNetworkBinding(model_base.BASEV2, models.TimestampMixin): """Represents a binding of a virtual network with a transport zone. This model class associates a Neutron network with a transport zone; optionally a vlan ID might be used if the binding type is 'bridge' """ __tablename__ = 'tz_network_bindings' # TODO(arosen) - it might be worth while refactoring the how this data # is stored later so every column does not need to be a primary key. network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), primary_key=True) # 'flat', 'vlan', 'stt', 'gre', 'l3_ext', 'geneve', 'portgroup', 'nsx-net' binding_type = sa.Column(sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', 'geneve', 'portgroup', 'nsx-net', name='tz_network_bindings_binding_type'), nullable=False, primary_key=True) phy_uuid = sa.Column(sa.String(36), primary_key=True, default='') vlan_id = sa.Column(sa.Integer, primary_key=True, autoincrement=False, default=0) def __init__(self, network_id, binding_type, phy_uuid, vlan_id): self.network_id = network_id self.binding_type = binding_type self.phy_uuid = phy_uuid self.vlan_id = vlan_id def __repr__(self): return "" % (self.network_id, self.binding_type, self.phy_uuid, self.vlan_id) class NeutronNsxNetworkMapping(model_base.BASEV2, models.TimestampMixin): """Maps neutron network identifiers to NSX identifiers. Because of chained logical switches more than one mapping might exist for a single Neutron network. For a VLAN network, one neutron network may map to multiple logical switches(port groups) created on multiple DVSes in the backend for NSX-V plugin. DVS-ID will store the moref of the DVS where the nsx id is being created. For other types and plugins, this value will remain null. """ __tablename__ = 'neutron_nsx_network_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete='CASCADE'), primary_key=True) nsx_id = sa.Column(sa.String(36), primary_key=True) dvs_id = sa.Column(sa.String(36), nullable=True) class NeutronNsxSecurityGroupMapping(model_base.BASEV2, models.TimestampMixin): """Backend mappings for Neutron Security Group identifiers. This class maps a neutron security group identifier to the corresponding NSX security profile identifier. """ __tablename__ = 'neutron_nsx_security_group_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('securitygroups.id', ondelete="CASCADE"), primary_key=True) nsx_id = sa.Column(sa.String(36), primary_key=True) class NeutronNsxFirewallSectionMapping(model_base.BASEV2, models.TimestampMixin): """Backend mappings for Neutron Security-group associated fw sections.""" __tablename__ = 'neutron_nsx_firewall_section_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('securitygroups.id', ondelete='CASCADE'), primary_key=True, nullable=False) nsx_id = sa.Column(sa.String(36), nullable=False) class NeutronNsxRuleMapping(model_base.BASEV2, models.TimestampMixin): """Backend mappings for firewall rules. This class maps a neutron security group rule with NSX firewall rule. """ __tablename__ = 'neutron_nsx_rule_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('securitygrouprules.id', ondelete="CASCADE"), primary_key=True, nullable=False) nsx_id = sa.Column(sa.String(36), nullable=False) class NeutronNsxPortMapping(model_base.BASEV2, models.TimestampMixin): """Represents the mapping between neutron and nsx port uuids.""" __tablename__ = 'neutron_nsx_port_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) nsx_switch_id = sa.Column(sa.String(36)) nsx_port_id = sa.Column(sa.String(36), nullable=False) def __init__(self, neutron_id, nsx_switch_id, nsx_port_id): self.neutron_id = neutron_id self.nsx_switch_id = nsx_switch_id self.nsx_port_id = nsx_port_id class NeutronNsxRouterMapping(model_base.BASEV2, models.TimestampMixin): """Maps neutron router identifiers to NSX identifiers.""" __tablename__ = 'neutron_nsx_router_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id', ondelete='CASCADE'), primary_key=True) nsx_id = sa.Column(sa.String(36)) class NeutronNsxServiceBinding(model_base.BASEV2, models.TimestampMixin): """Represents a binding of a Neutron network with enabled NSX services.""" __tablename__ = 'neutron_nsx_service_bindings' network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete='CASCADE'), nullable=False, primary_key=True) port_id = sa.Column(sa.String(36), nullable=True) nsx_service_type = sa.Column( sa.Enum(nsx_constants.SERVICE_DHCP, name='neutron_nsx_service_bindings_service_type'), nullable=False, primary_key=True) nsx_service_id = sa.Column(sa.String(36), nullable=False) class NeutronNsxDhcpBinding(model_base.BASEV2, models.TimestampMixin): """Represents a binding of a Neutron port with DHCP address binding.""" __tablename__ = 'neutron_nsx_dhcp_bindings' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), nullable=False, primary_key=True) subnet_id = sa.Column(sa.String(36), nullable=False) ip_address = sa.Column(sa.String(64), nullable=False) nsx_service_id = sa.Column(sa.String(36), nullable=False) nsx_binding_id = sa.Column(sa.String(36), nullable=False, primary_key=True) class MultiProviderNetworks(model_base.BASEV2, models.TimestampMixin): """Networks provisioned through multiprovider extension.""" __tablename__ = 'multi_provider_networks' network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), primary_key=True) def __init__(self, network_id): self.network_id = network_id class NetworkConnection(model_base.BASEV2, model_base.HasProject, models.TimestampMixin): """Defines a connection between a network gateway and a network.""" # We use port_id as the primary key as one can connect a gateway # to a network in multiple ways (and we cannot use the same port form # more than a single gateway) network_gateway_id = sa.Column(sa.String(36), sa.ForeignKey('networkgateways.id', ondelete='CASCADE')) network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete='CASCADE')) segmentation_type = sa.Column( sa.Enum('flat', 'vlan', name='networkconnections_segmentation_type')) segmentation_id = sa.Column(sa.Integer) __table_args__ = (sa.UniqueConstraint(network_gateway_id, segmentation_type, segmentation_id), model_base.BASEV2.__table_args__) # Also, storing port id comes back useful when disconnecting a network # from a gateway port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete='CASCADE'), primary_key=True) class NetworkGatewayDeviceReference(model_base.BASEV2, models.TimestampMixin): id = sa.Column(sa.String(36), primary_key=True) network_gateway_id = sa.Column(sa.String(36), sa.ForeignKey('networkgateways.id', ondelete='CASCADE'), primary_key=True) interface_name = sa.Column(sa.String(64), primary_key=True) class NetworkGatewayDevice(model_base.BASEV2, model_base.HasId, model_base.HasProject, models.TimestampMixin): nsx_id = sa.Column(sa.String(36)) # Optional name for the gateway device name = sa.Column(sa.String(255)) # Transport connector type. Not using enum as range of # connector types might vary with backend version connector_type = sa.Column(sa.String(10)) # Transport connector IP Address connector_ip = sa.Column(sa.String(64)) # operational status status = sa.Column(sa.String(16)) class NetworkGateway(model_base.BASEV2, model_base.HasId, model_base.HasProject, models.TimestampMixin): """Defines the data model for a network gateway.""" name = sa.Column(sa.String(255)) default = sa.Column(sa.Boolean()) devices = orm.relationship(NetworkGatewayDeviceReference, backref='networkgateways', cascade='all,delete') network_connections = orm.relationship(NetworkConnection, lazy='joined') class MacLearningState(model_base.BASEV2, models.TimestampMixin): port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) mac_learning_enabled = sa.Column(sa.Boolean(), nullable=False) # Add a relationship to the Port model using the backref attribute. # This will instruct SQLAlchemy to eagerly load this association. port = orm.relationship( models_v2.Port, backref=orm.backref("mac_learning_state", lazy='joined', uselist=False, cascade='delete')) class LsnPort(models_v2.model_base.BASEV2, models.TimestampMixin): __tablename__ = 'lsn_port' lsn_port_id = sa.Column(sa.String(36), primary_key=True) lsn_id = sa.Column(sa.String(36), sa.ForeignKey('lsn.lsn_id', ondelete="CASCADE"), nullable=False) sub_id = sa.Column(sa.String(36), nullable=False, unique=True) mac_addr = sa.Column(sa.String(32), nullable=False, unique=True) def __init__(self, lsn_port_id, subnet_id, mac_address, lsn_id): self.lsn_port_id = lsn_port_id self.lsn_id = lsn_id self.sub_id = subnet_id self.mac_addr = mac_address class Lsn(models_v2.model_base.BASEV2, models.TimestampMixin): __tablename__ = 'lsn' lsn_id = sa.Column(sa.String(36), primary_key=True) net_id = sa.Column(sa.String(36), nullable=False) def __init__(self, net_id, lsn_id): self.net_id = net_id self.lsn_id = lsn_id class QoSQueue(model_base.BASEV2, model_base.HasId, model_base.HasProject, models.TimestampMixin): name = sa.Column(sa.String(255)) default = sa.Column(sa.Boolean, default=False, server_default=sql.false()) min = sa.Column(sa.Integer, nullable=False) max = sa.Column(sa.Integer, nullable=True) qos_marking = sa.Column(sa.Enum('untrusted', 'trusted', name='qosqueues_qos_marking')) dscp = sa.Column(sa.Integer) class PortQueueMapping(model_base.BASEV2, models.TimestampMixin): port_id = sa.Column(sa.String(36), sa.ForeignKey("ports.id", ondelete="CASCADE"), primary_key=True) queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id"), primary_key=True) # Add a relationship to the Port model adding a backref which will # allow SQLAlchemy for eagerly load the queue binding port = orm.relationship( models_v2.Port, backref=orm.backref("qos_queue", uselist=False, cascade='delete', lazy='joined')) class NetworkQueueMapping(model_base.BASEV2, models.TimestampMixin): network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id", ondelete="CASCADE"), primary_key=True) queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id", ondelete="CASCADE")) # Add a relationship to the Network model adding a backref which will # allow SQLAlcremy for eagerly load the queue binding network = orm.relationship( models_v2.Network, backref=orm.backref("qos_queue", uselist=False, cascade='delete', lazy='joined')) class NsxL2GWConnectionMapping(model_base.BASEV2, models.TimestampMixin): """Define a mapping between L2 gateway connection and bridge endpoint.""" __tablename__ = 'nsx_l2gw_connection_mappings' connection_id = sa.Column(sa.String(36), nullable=False, primary_key=True) port_id = sa.Column(sa.String(36), sa.ForeignKey("ports.id", ondelete="CASCADE"), nullable=False) bridge_endpoint_id = sa.Column(sa.String(36), nullable=False) class QosPolicySwitchProfile(model_base.BASEV2, models.TimestampMixin): # Maps neutron qos policy identifiers to NSX-V3 switch profile identifiers __tablename__ = 'neutron_nsx_qos_policy_mappings' qos_policy_id = sa.Column(sa.String(36), primary_key=True) switch_profile_id = sa.Column(sa.String(36), nullable=False) class NsxPortMirrorSessionMapping(model_base.BASEV2): """Define a mapping between Tap Flow and PortMirrorSession object.""" __tablename__ = 'nsx_port_mirror_session_mappings' tap_flow_id = sa.Column(sa.String(36), nullable=False, primary_key=True) port_mirror_session_id = sa.Column(sa.String(36), nullable=False) class NsxSubnetIpam(model_base.BASEV2, models.TimestampMixin): """Map Subnets with their backend pool id.""" __tablename__ = 'nsx_subnet_ipam' # the Subnet id is not a foreign key because the subnet is deleted # before the pool does subnet_id = sa.Column(sa.String(36), primary_key=True) nsx_pool_id = sa.Column(sa.String(36), primary_key=True) class NsxCertificateRepository(model_base.BASEV2, models.TimestampMixin): """Stores certificate and private key per logical purpose. For now, will have zero or one rows with nsxv3 client certificate """ __tablename__ = 'nsx_certificates' purpose = sa.Column(sa.String(32), nullable=False, primary_key=True) certificate = sa.Column(sa.String(9216), nullable=False) private_key = sa.Column(sa.String(5120), nullable=False) class NsxLbaasLoadbalancer(model_base.BASEV2, models.TimestampMixin): """Stores mapping of LBaaS loadbalancer and NSX LB service and router Since in NSXv3, multiple loadbalancers may share the same LB service on NSX backend. And the in turn LB service attaches to a logical router. This stores the mapping between LBaaS loadbalancer and NSX LB service id and NSX logical router id. """ __tablename__ = 'nsxv3_lbaas_loadbalancers' fk_name = 'fk_nsxv3_lbaas_loadbalancers_id' loadbalancer_id = sa.Column(sa.String(36), sa.ForeignKey('lbaas_loadbalancers.id', name=fk_name, ondelete="CASCADE"), primary_key=True) lb_router_id = sa.Column(sa.String(36), nullable=False) lb_service_id = sa.Column(sa.String(36), nullable=False) vip_address = sa.Column(sa.String(36), nullable=False) class NsxLbaasListener(model_base.BASEV2, models.TimestampMixin): """Stores the mapping between LBaaS listener and NSX LB virtual server""" __tablename__ = 'nsxv3_lbaas_listeners' loadbalancer_id = sa.Column(sa.String(36), primary_key=True) listener_id = sa.Column(sa.String(36), sa.ForeignKey('lbaas_listeners.id', name='fk_nsxv3_lbaas_listeners_id', ondelete="CASCADE"), primary_key=True) app_profile_id = sa.Column(sa.String(36), nullable=False) lb_vs_id = sa.Column(sa.String(36), nullable=False) class NsxLbaasPool(model_base.BASEV2, models.TimestampMixin): """Stores the mapping between LBaaS pool and NSX LB Pool""" __tablename__ = 'nsxv3_lbaas_pools' loadbalancer_id = sa.Column(sa.String(36), primary_key=True) pool_id = sa.Column(sa.String(36), sa.ForeignKey('lbaas_pools.id', name='fk_nsxv3_lbaas_pools_id', ondelete="CASCADE"), primary_key=True) lb_pool_id = sa.Column(sa.String(36), nullable=False) lb_vs_id = sa.Column(sa.String(36)) class NsxLbaasMonitor(model_base.BASEV2, models.TimestampMixin): """Stores the mapping between LBaaS monitor and NSX LB monitor""" __tablename__ = 'nsxv3_lbaas_monitors' loadbalancer_id = sa.Column(sa.String(36), primary_key=True) pool_id = sa.Column(sa.String(36), primary_key=True) hm_id = sa.Column(sa.String(36), sa.ForeignKey('lbaas_healthmonitors.id', name='fk_nsxv3_lbaas_healthmonitors_id', ondelete="CASCADE"), primary_key=True) lb_monitor_id = sa.Column(sa.String(36), nullable=False) lb_pool_id = sa.Column(sa.String(36), nullable=False) class NsxLbaasL7Rule(model_base.BASEV2, models.TimestampMixin): """Stores the mapping between LBaaS monitor and NSX LB monitor This table is only used in Pike and obsoleted since Queen as the mapping has been stored in nsxv3_lbaas_l7policies table instead. This original table was added in pike so that we cannot change DB migration script there, but instead we update the table with a new db migration script in Queen. """ __tablename__ = 'nsxv3_lbaas_l7rules' loadbalancer_id = sa.Column(sa.String(36), primary_key=True) l7policy_id = sa.Column(sa.String(36), primary_key=True) l7rule_id = sa.Column(sa.String(36), sa.ForeignKey('lbaas_l7rules.id', name='fk_nsxv3_lbaas_l7rules_id', ondelete="CASCADE"), primary_key=True) lb_rule_id = sa.Column(sa.String(36), nullable=False) lb_vs_id = sa.Column(sa.String(36), nullable=False) class NsxLbaasL7Policy(model_base.BASEV2, models.TimestampMixin): """Stores the mapping between LBaaS l7policy and NSX LB rule""" __tablename__ = 'nsxv3_lbaas_l7policies' l7policy_id = sa.Column(sa.String(36), sa.ForeignKey('lbaas_l7policies.id', name='fk_nsxv3_lbaas_l7policies_id', ondelete="CASCADE"), primary_key=True) lb_rule_id = sa.Column(sa.String(36), nullable=False) lb_vs_id = sa.Column(sa.String(36), nullable=False) class NsxProjectPluginMapping(model_base.BASEV2, models.TimestampMixin): """Stores the mapping between the neutron plugin and the project id""" __tablename__ = 'nsx_project_plugin_mappings' project = sa.Column(sa.String(36), primary_key=True) plugin = sa.Column(sa.Enum('dvs', 'nsx-v', 'nsx-t'), nullable=False) class NsxVpnConnectionMapping(model_base.BASEV2, models.TimestampMixin): """Stores the mapping between VPNaaS connections and NSX objects""" __tablename__ = 'neutron_nsx_vpn_connection_mappings' neutron_id = sa.Column(sa.String(36), primary_key=True) session_id = sa.Column(sa.String(36), nullable=False) dpd_profile_id = sa.Column(sa.String(36), nullable=False) ike_profile_id = sa.Column(sa.String(36), nullable=False) ipsec_profile_id = sa.Column(sa.String(36), nullable=False) peer_ep_id = sa.Column(sa.String(36), nullable=False) vmware-nsx-12.0.1/vmware_nsx/db/networkgw_db.py0000666000175100017510000005502213244523345021565 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.orm import exc as sa_orm_exc from neutron.db import _model_query as model_query from neutron.db import _utils as db_utils from neutron.db import api as db_api from neutron.plugins.common import utils from neutron_lib import constants from neutron_lib import exceptions from oslo_log import log as logging from oslo_utils import uuidutils import six from vmware_nsx._i18n import _ from vmware_nsx.db import nsx_models from vmware_nsx.extensions import networkgw LOG = logging.getLogger(__name__) DEVICE_OWNER_NET_GW_INTF = 'network:gateway-interface' NETWORK_ID = 'network_id' SEGMENTATION_TYPE = 'segmentation_type' SEGMENTATION_ID = 'segmentation_id' ALLOWED_CONNECTION_ATTRIBUTES = set((NETWORK_ID, SEGMENTATION_TYPE, SEGMENTATION_ID)) # Constants for gateway device operational status STATUS_UNKNOWN = "UNKNOWN" STATUS_ERROR = "ERROR" STATUS_ACTIVE = "ACTIVE" STATUS_DOWN = "DOWN" class GatewayInUse(exceptions.InUse): message = _("Network Gateway '%(gateway_id)s' still has active mappings " "with one or more neutron networks.") class GatewayNotFound(exceptions.NotFound): message = _("Network Gateway %(gateway_id)s could not be found") class GatewayDeviceInUse(exceptions.InUse): message = _("Network Gateway Device '%(device_id)s' is still used by " "one or more network gateways.") class GatewayDeviceNotFound(exceptions.NotFound): message = _("Network Gateway Device %(device_id)s could not be found.") class GatewayDevicesNotFound(exceptions.NotFound): message = _("One or more Network Gateway Devices could not be found: " "%(device_ids)s.") class NetworkGatewayPortInUse(exceptions.InUse): message = _("Port '%(port_id)s' is owned by '%(device_owner)s' and " "therefore cannot be deleted directly via the port API.") class GatewayConnectionInUse(exceptions.InUse): message = _("The specified mapping '%(mapping)s' is already in use on " "network gateway '%(gateway_id)s'.") class MultipleGatewayConnections(exceptions.Conflict): message = _("Multiple network connections found on '%(gateway_id)s' " "with provided criteria.") class GatewayConnectionNotFound(exceptions.NotFound): message = _("The connection %(network_mapping_info)s was not found on the " "network gateway '%(network_gateway_id)s'") class NetworkGatewayUnchangeable(exceptions.InUse): message = _("The network gateway %(gateway_id)s " "cannot be updated or deleted") class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase): gateway_resource = networkgw.GATEWAY_RESOURCE_NAME device_resource = networkgw.DEVICE_RESOURCE_NAME def _get_network_gateway(self, context, gw_id): try: gw = model_query.get_by_id(context, nsx_models.NetworkGateway, gw_id) except sa_orm_exc.NoResultFound: raise GatewayNotFound(gateway_id=gw_id) return gw def _make_gw_connection_dict(self, gw_conn): return {'port_id': gw_conn['port_id'], 'segmentation_type': gw_conn['segmentation_type'], 'segmentation_id': gw_conn['segmentation_id']} def _make_network_gateway_dict(self, network_gateway, fields=None): device_list = [] for d in network_gateway['devices']: device_list.append({'id': d['id'], 'interface_name': d['interface_name']}) res = {'id': network_gateway['id'], 'name': network_gateway['name'], 'default': network_gateway['default'], 'devices': device_list, 'tenant_id': network_gateway['tenant_id']} # Query gateway connections only if needed if not fields or 'ports' in fields: res['ports'] = [self._make_gw_connection_dict(conn) for conn in network_gateway.network_connections] return db_utils.resource_fields(res, fields) def _set_mapping_info_defaults(self, mapping_info): if not mapping_info.get('segmentation_type'): mapping_info['segmentation_type'] = 'flat' if not mapping_info.get('segmentation_id'): mapping_info['segmentation_id'] = 0 def _validate_network_mapping_info(self, network_mapping_info): self._set_mapping_info_defaults(network_mapping_info) network_id = network_mapping_info.get(NETWORK_ID) if not network_id: raise exceptions.InvalidInput( error_message=_("A network identifier must be specified " "when connecting a network to a network " "gateway. Unable to complete operation")) connection_attrs = set(network_mapping_info.keys()) if not connection_attrs.issubset(ALLOWED_CONNECTION_ATTRIBUTES): raise exceptions.InvalidInput( error_message=(_("Invalid keys found among the ones provided " "in request body: %(connection_attrs)s."), connection_attrs)) seg_type = network_mapping_info.get(SEGMENTATION_TYPE) seg_id = network_mapping_info.get(SEGMENTATION_ID) # It is important to validate that the segmentation ID is actually an # integer value try: seg_id = int(seg_id) except ValueError: msg = _("An invalid segmentation ID was specified. The " "segmentation ID must be a positive integer number") raise exceptions.InvalidInput(error_message=msg) # The NSX plugin accepts 0 as a valid vlan tag seg_id_valid = seg_id == 0 or utils.is_valid_vlan_tag(seg_id) if seg_type.lower() == 'flat' and seg_id: msg = _("Cannot specify a segmentation id when " "the segmentation type is flat") raise exceptions.InvalidInput(error_message=msg) elif (seg_type.lower() == 'vlan' and not seg_id_valid): msg = _("Invalid segmentation id (%s) for " "vlan segmentation type") % seg_id raise exceptions.InvalidInput(error_message=msg) return network_id def _retrieve_gateway_connections(self, context, gateway_id, mapping_info=None, only_one=False): mapping_info = mapping_info or {} filters = {'network_gateway_id': [gateway_id]} for k, v in six.iteritems(mapping_info): if v and k != NETWORK_ID: filters[k] = [v] query = model_query.get_collection_query(context, nsx_models.NetworkConnection, filters) return query.one() if only_one else query.all() def _unset_default_network_gateways(self, context): with db_api.context_manager.writer.using(context): context.session.query(nsx_models.NetworkGateway).update( {nsx_models.NetworkGateway.default: False}) def _set_default_network_gateway(self, context, gw_id): with db_api.context_manager.writer.using(context): gw = (context.session.query(nsx_models.NetworkGateway). filter_by(id=gw_id).one()) gw['default'] = True def prevent_network_gateway_port_deletion(self, context, port): """Pre-deletion check. Ensures a port will not be deleted if is being used by a network gateway. In that case an exception will be raised. """ if port['device_owner'] == DEVICE_OWNER_NET_GW_INTF: raise NetworkGatewayPortInUse(port_id=port['id'], device_owner=port['device_owner']) def _validate_device_list(self, context, tenant_id, gateway_data): device_query = self._query_gateway_devices( context, filters={'id': [device['id'] for device in gateway_data['devices']]}) retrieved_device_ids = set() for device in device_query: retrieved_device_ids.add(device['id']) if device['tenant_id'] != tenant_id: raise GatewayDeviceNotFound(device_id=device['id']) missing_device_ids = ( set(device['id'] for device in gateway_data['devices']) - retrieved_device_ids) if missing_device_ids: raise GatewayDevicesNotFound( device_ids=",".join(missing_device_ids)) def create_network_gateway(self, context, network_gateway, validate_device_list=True): gw_data = network_gateway[self.gateway_resource] tenant_id = gw_data['tenant_id'] with db_api.context_manager.writer.using(context): gw_db = nsx_models.NetworkGateway( id=gw_data.get('id', uuidutils.generate_uuid()), tenant_id=tenant_id, name=gw_data.get('name')) # Device list is guaranteed to be a valid list, but some devices # might still either not exist or belong to a different tenant if validate_device_list: self._validate_device_list(context, tenant_id, gw_data) gw_db.devices.extend( [nsx_models.NetworkGatewayDeviceReference(**device) for device in gw_data['devices']]) context.session.add(gw_db) LOG.debug("Created network gateway with id:%s", gw_db['id']) return self._make_network_gateway_dict(gw_db) def update_network_gateway(self, context, id, network_gateway): gw_data = network_gateway[self.gateway_resource] with db_api.context_manager.writer.using(context): gw_db = self._get_network_gateway(context, id) if gw_db.default: raise NetworkGatewayUnchangeable(gateway_id=id) # Ensure there is something to update before doing it if any([gw_db[k] != gw_data[k] for k in gw_data]): gw_db.update(gw_data) LOG.debug("Updated network gateway with id:%s", id) return self._make_network_gateway_dict(gw_db) def get_network_gateway(self, context, id, fields=None): gw_db = self._get_network_gateway(context, id) return self._make_network_gateway_dict(gw_db, fields) def delete_network_gateway(self, context, id): with db_api.context_manager.writer.using(context): gw_db = self._get_network_gateway(context, id) if gw_db.network_connections: raise GatewayInUse(gateway_id=id) if gw_db.default: raise NetworkGatewayUnchangeable(gateway_id=id) context.session.delete(gw_db) LOG.debug("Network gateway '%s' was destroyed.", id) def get_network_gateways(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = db_utils.get_marker_obj(self, context, 'network_gateway', limit, marker) return model_query.get_collection(context, nsx_models.NetworkGateway, self._make_network_gateway_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def connect_network(self, context, network_gateway_id, network_mapping_info): network_id = self._validate_network_mapping_info(network_mapping_info) LOG.debug("Connecting network '%(network_id)s' to gateway " "'%(network_gateway_id)s'", {'network_id': network_id, 'network_gateway_id': network_gateway_id}) with db_api.context_manager.writer.using(context): gw_db = self._get_network_gateway(context, network_gateway_id) tenant_id = gw_db['tenant_id'] if context.is_admin and not tenant_id: tenant_id = context.tenant_id # TODO(salvatore-orlando): Leverage unique constraint instead # of performing another query! if self._retrieve_gateway_connections(context, network_gateway_id, network_mapping_info): raise GatewayConnectionInUse(mapping=network_mapping_info, gateway_id=network_gateway_id) # TODO(salvatore-orlando): Creating a port will give it an IP, # but we actually do not need any. Instead of wasting an IP we # should have a way to say a port shall not be associated with # any subnet try: # We pass the segmentation type and id too - the plugin # might find them useful as the network connection object # does not exist yet. # NOTE: they're not extended attributes, rather extra data # passed in the port structure to the plugin # TODO(salvatore-orlando): Verify optimal solution for # ownership of the gateway port port = self.create_port(context, { 'port': {'tenant_id': tenant_id, 'network_id': network_id, 'mac_address': constants.ATTR_NOT_SPECIFIED, 'admin_state_up': True, 'fixed_ips': [], 'device_id': network_gateway_id, 'device_owner': DEVICE_OWNER_NET_GW_INTF, 'name': '', 'gw:segmentation_type': network_mapping_info.get('segmentation_type'), 'gw:segmentation_id': network_mapping_info.get('segmentation_id')}}) except exceptions.NetworkNotFound: err_msg = (_("Requested network '%(network_id)s' not found." "Unable to create network connection on " "gateway '%(network_gateway_id)s") % {'network_id': network_id, 'network_gateway_id': network_gateway_id}) LOG.error(err_msg) raise exceptions.InvalidInput(error_message=err_msg) port_id = port['id'] LOG.debug("Gateway port for '%(network_gateway_id)s' " "created on network '%(network_id)s':%(port_id)s", {'network_gateway_id': network_gateway_id, 'network_id': network_id, 'port_id': port_id}) # Create NetworkConnection record network_mapping_info['port_id'] = port_id network_mapping_info['tenant_id'] = tenant_id gw_db.network_connections.append( nsx_models.NetworkConnection(**network_mapping_info)) port_id = port['id'] # now deallocate and recycle ip from the port for fixed_ip in port.get('fixed_ips', []): self._delete_ip_allocation(context, network_id, fixed_ip['subnet_id'], fixed_ip['ip_address']) LOG.debug("Ensured no Ip addresses are configured on port %s", port_id) return {'connection_info': {'network_gateway_id': network_gateway_id, 'network_id': network_id, 'port_id': port_id}} def disconnect_network(self, context, network_gateway_id, network_mapping_info): network_id = self._validate_network_mapping_info(network_mapping_info) LOG.debug("Disconnecting network '%(network_id)s' from gateway " "'%(network_gateway_id)s'", {'network_id': network_id, 'network_gateway_id': network_gateway_id}) with db_api.context_manager.writer.using(context): # Uniquely identify connection, otherwise raise try: net_connection = self._retrieve_gateway_connections( context, network_gateway_id, network_mapping_info, only_one=True) except sa_orm_exc.NoResultFound: raise GatewayConnectionNotFound( network_mapping_info=network_mapping_info, network_gateway_id=network_gateway_id) except sa_orm_exc.MultipleResultsFound: raise MultipleGatewayConnections( gateway_id=network_gateway_id) # Remove gateway port from network # FIXME(salvatore-orlando): Ensure state of port in NSX is # consistent with outcome of transaction self.delete_port(context, net_connection['port_id'], nw_gw_port_check=False) # Remove NetworkConnection record context.session.delete(net_connection) def _make_gateway_device_dict(self, gateway_device, fields=None, include_nsx_id=False): res = {'id': gateway_device['id'], 'name': gateway_device['name'], 'status': gateway_device['status'], 'connector_type': gateway_device['connector_type'], 'connector_ip': gateway_device['connector_ip'], 'tenant_id': gateway_device['tenant_id']} if include_nsx_id: # Return the NSX mapping as well. This attribute will not be # returned in the API response anyway. Ensure it will not be # filtered out in field selection. if fields: fields.append('nsx_id') res['nsx_id'] = gateway_device['nsx_id'] return db_utils.resource_fields(res, fields) def _get_gateway_device(self, context, device_id): try: return model_query.get_by_id(context, nsx_models.NetworkGatewayDevice, device_id) except sa_orm_exc.NoResultFound: raise GatewayDeviceNotFound(device_id=device_id) def _is_device_in_use(self, context, device_id): query = model_query.get_collection_query( context, nsx_models.NetworkGatewayDeviceReference, {'id': [device_id]}) return query.first() def get_gateway_device(self, context, device_id, fields=None, include_nsx_id=False): return self._make_gateway_device_dict( self._get_gateway_device(context, device_id), fields, include_nsx_id) def _query_gateway_devices(self, context, filters=None, sorts=None, limit=None, marker=None, page_reverse=None): marker_obj = db_utils.get_marker_obj(self, context, 'gateway_device', limit, marker) return self._get_collection_query(context, nsx_models.NetworkGatewayDevice, filters=filters, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def get_gateway_devices(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False, include_nsx_id=False): query = self._query_gateway_devices(context, filters, sorts, limit, marker, page_reverse) return [self._make_gateway_device_dict(row, fields, include_nsx_id) for row in query] def create_gateway_device(self, context, gateway_device, initial_status=STATUS_UNKNOWN): device_data = gateway_device[self.device_resource] tenant_id = device_data['tenant_id'] with db_api.context_manager.writer.using(context): device_db = nsx_models.NetworkGatewayDevice( id=device_data.get('id', uuidutils.generate_uuid()), tenant_id=tenant_id, name=device_data.get('name'), connector_type=device_data['connector_type'], connector_ip=device_data['connector_ip'], status=initial_status) context.session.add(device_db) LOG.debug("Created network gateway device: %s", device_db['id']) return self._make_gateway_device_dict(device_db) def update_gateway_device(self, context, gateway_device_id, gateway_device, include_nsx_id=False): device_data = gateway_device[self.device_resource] with db_api.context_manager.writer.using(context): device_db = self._get_gateway_device(context, gateway_device_id) # Ensure there is something to update before doing it if any([device_db[k] != device_data[k] for k in device_data]): device_db.update(device_data) LOG.debug("Updated network gateway device: %s", gateway_device_id) return self._make_gateway_device_dict( device_db, include_nsx_id=include_nsx_id) def delete_gateway_device(self, context, device_id): with db_api.context_manager.writer.using(context): # A gateway device should not be deleted # if it is used in any network gateway service if self._is_device_in_use(context, device_id): raise GatewayDeviceInUse(device_id=device_id) device_db = self._get_gateway_device(context, device_id) context.session.delete(device_db) LOG.debug("Deleted network gateway device: %s.", device_id) vmware-nsx-12.0.1/vmware_nsx/db/migration/0000775000175100017510000000000013244524600020475 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/0000775000175100017510000000000013244524600024325 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/0000775000175100017510000000000013244524600026175 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/queens/0000775000175100017510000000000013244524600027475 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/0000775000175100017510000000000013244524600031312 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000017700000000000011222 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/a1be06050b41_update_nsx_binding_types.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/a1be06050b41_u0000666000175100017510000000373713244523345033314 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """update nsx binding types Revision ID: a1be06050b41 Revises: 84ceffa27115 Create Date: 2017-09-04 23:58:22.003350 """ # revision identifiers, used by Alembic. revision = 'a1be06050b41' down_revision = '84ceffa27115' depends_on = ('aede17d51d0f') from alembic import op import sqlalchemy as sa from neutron.db import migration as neutron_op all_tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', 'vxlan', 'geneve', 'portgroup', 'nsx-net', name='tz_network_bindings_binding_type') new_tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', 'geneve', 'portgroup', 'nsx-net', name='tz_network_bindings_binding_type') def upgrade(): # add the new network types to the enum neutron_op.alter_enum_add_value( 'tz_network_bindings', 'binding_type', all_tz_binding_type_enum, False) # change existing entries with type 'vxlan' to 'geneve' op.execute("UPDATE tz_network_bindings SET binding_type='geneve' " "where binding_type='vxlan'") # remove 'vxlan' from the enum op.alter_column( 'tz_network_bindings', 'binding_type', type_=new_tz_binding_type_enum, existing_type=all_tz_binding_type_enum, existing_nullable=False) ././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/717f7f63a219_nsxv3_lbaas_l7policy.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/717f7f63a219_n0000666000175100017510000000447513244523413033256 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsxv3_lbaas_l7policy Revision ID: 717f7f63a219 Revises: a1be06050b41 Create Date: 2017-10-26 08:32:40.846088 """ # revision identifiers, used by Alembic. revision = '717f7f63a219' down_revision = 'a1be06050b41' from alembic import op import sqlalchemy as sa from neutron.db import migration def upgrade(): if migration.schema_has_table('nsxv3_lbaas_l7rules'): op.drop_constraint('fk_nsxv3_lbaas_l7rules_id', 'nsxv3_lbaas_l7rules', 'foreignkey') op.drop_constraint('l7rule_id', 'nsxv3_lbaas_l7rules', 'primary') op.drop_column('nsxv3_lbaas_l7rules', 'loadbalancer_id') op.drop_column('nsxv3_lbaas_l7rules', 'l7rule_id') op.rename_table('nsxv3_lbaas_l7rules', 'nsxv3_lbaas_l7policies') if migration.schema_has_table('lbaas_l7policies'): op.create_foreign_key( 'fk_nsxv3_lbaas_l7policies_id', 'nsxv3_lbaas_l7policies', 'lbaas_l7policies', ['l7policy_id'], ['id'], ondelete='CASCADE') else: op.create_table( 'nsxv3_lbaas_l7policies', sa.Column('l7policy_id', sa.String(36), nullable=False), sa.Column('lb_rule_id', sa.String(36), nullable=False), sa.Column('lb_vs_id', sa.String(36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('l7policy_id')) if migration.schema_has_table('lbaas_l7policies'): op.create_foreign_key( 'fk_nsxv3_lbaas_l7policies_id', 'nsxv3_lbaas_l7policies', 'lbaas_l7policies', ['l7policy_id'], ['id'], ondelete='CASCADE') vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/queens/expand/0000775000175100017510000000000013244524600030754 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/queens/expand/9799427fc0e1_nsx_tv_map.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/queens/expand/9799427fc0e1_nsx0000666000175100017510000000250613244523413033272 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsx map project to plugin Revision ID: 9799427fc0e1 Revises: ea7a72ab9643 Create Date: 2017-06-12 16:59:48.021909 """ # revision identifiers, used by Alembic. revision = '9799427fc0e1' down_revision = 'ea7a72ab9643' from alembic import op import sqlalchemy as sa plugin_type_enum = sa.Enum('dvs', 'nsx-v', 'nsx-t', name='nsx_plugin_type') def upgrade(): op.create_table( 'nsx_project_plugin_mappings', sa.Column('project', sa.String(36), nullable=False), sa.Column('plugin', plugin_type_enum, nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('project')) ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/queens/expand/0dbeda408e41_nsxv3_vpn_mapping.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/queens/expand/0dbeda408e41_nsx0000666000175100017510000000276413244523345033474 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsxv3_vpn_mapping Revision ID: 0dbeda408e41 Revises: 9799427fc0e1 Create Date: 2017-11-26 12:27:40.846088 """ # revision identifiers, used by Alembic. revision = '0dbeda408e41' down_revision = '9799427fc0e1' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'neutron_nsx_vpn_connection_mappings', sa.Column('neutron_id', sa.String(36), nullable=False), sa.Column('session_id', sa.String(36), nullable=False), sa.Column('dpd_profile_id', sa.String(36), nullable=False), sa.Column('ike_profile_id', sa.String(36), nullable=False), sa.Column('ipsec_profile_id', sa.String(36), nullable=False), sa.Column('peer_ep_id', sa.String(36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('neutron_id')) vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/ocata/0000775000175100017510000000000013244524600027264 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/ocata/contract/0000775000175100017510000000000013244524600031101 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000017500000000000011220 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/ocata/contract/14a89ddf96e2_add_az_internal_network.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/ocata/contract/14a89ddf96e2_ad0000666000175100017510000000322113244523345033327 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSX Adds a 'availability_zone' attribute to internal-networks table Revision ID: 14a89ddf96e2 Revises: 5c8f451290b7 Create Date: 2017-02-05 14:34:21.163418 """ # revision identifiers, used by Alembic. revision = '14a89ddf96e2' down_revision = '5c8f451290b7' from alembic import op import sqlalchemy as sa from sqlalchemy.engine import reflection from neutron.db import migration # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.OCATA] def upgrade(): table_name = 'nsxv_internal_networks' # Add the new column op.add_column(table_name, sa.Column( 'availability_zone', sa.String(36), server_default='default')) # replace the old primary key constraint with a new one for both # purpose & az inspector = reflection.Inspector.from_engine(op.get_bind()) pk_constraint = inspector.get_pk_constraint(table_name) op.drop_constraint(pk_constraint.get('name'), table_name, type_='primary') op.create_primary_key(None, table_name, ['network_purpose', 'availability_zone']) ././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/ocata/contract/5c8f451290b7_nsx_ipam_table_rename.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/ocata/contract/5c8f451290b7_ns0000666000175100017510000000174313244523345033223 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsxv_subnet_ipam rename to nsx_subnet_ipam Revision ID: 5c8f451290b7 Revises: d49ac91b560e Create Date: 2016-12-25 11:08:30.300482 """ # revision identifiers, used by Alembic. revision = '5c8f451290b7' down_revision = 'd49ac91b560e' depends_on = ('6e6da8296c0e',) from alembic import op def upgrade(): op.rename_table('nsxv_subnet_ipam', 'nsx_subnet_ipam') vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/0000775000175100017510000000000013244524600030543 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/01a33f93f5fd_nsxv_lbv2_l7pol.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/01a33f93f5fd_nsxv0000666000175100017510000000330613244523345033373 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsxv_lbv2_l7policy Revision ID: 01a33f93f5fd Revises: dd9fe5a3a526 Create Date: 2017-01-04 10:10:59.990122 """ # revision identifiers, used by Alembic. revision = '01a33f93f5fd' down_revision = 'dd9fe5a3a526' from alembic import op import sqlalchemy as sa from neutron.db import migration # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.OCATA] def upgrade(): if migration.schema_has_table('lbaas_l7policies'): op.create_table( 'nsxv_lbaas_l7policy_bindings', sa.Column('policy_id', sa.String(length=36), nullable=False), sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('edge_app_rule_id', sa.String(length=36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('policy_id'), sa.ForeignKeyConstraint(['policy_id'], ['lbaas_l7policies.id'], ondelete='CASCADE')) ././@LongLink0000000000000000000000000000017500000000000011220 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/dd9fe5a3a526_nsx_add_certificate_table.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/dd9fe5a3a526_nsx_0000666000175100017510000000251213244523345033426 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSX Adds certificate table for client certificate management Revision ID: dd9fe5a3a526 Revises: e816d4fe9d4f Create Date: 2017-01-06 12:30:01.070022 """ # revision identifiers, used by Alembic. revision = 'dd9fe5a3a526' down_revision = 'e816d4fe9d4f' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table('nsx_certificates', sa.Column('purpose', sa.String(length=32), nullable=False), sa.Column('certificate', sa.String(length=9216), nullable=False), sa.Column('private_key', sa.String(length=5120), nullable=False), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('created_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('purpose')) ././@LongLink0000000000000000000000000000020100000000000011206 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/e816d4fe9d4f_nsx_add_policy_security_group.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/e816d4fe9d4f_nsx_0000666000175100017510000000200413244523345033435 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSX Adds a 'policy' attribute to security-group Revision ID: e816d4fe9d4f Revises: 7b5ec3caa9a4 Create Date: 2016-10-06 11:30:31.263918 """ # revision identifiers, used by Alembic. revision = 'e816d4fe9d4f' down_revision = '7b5ec3caa9a4' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('nsx_extended_security_group_properties', sa.Column('policy', sa.String(36))) vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/kilo_release.py0000666000175100017510000000153113244523345031214 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """kilo Revision ID: kilo Revises: None Create Date: 2015-04-16 00:00:00.000000 """ # revision identifiers, used by Alembic. revision = 'kilo' down_revision = None def upgrade(): """A no-op migration for marking the Kilo release.""" pass vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/EXPAND_HEAD0000666000175100017510000000001513244523345027663 0ustar zuulzuul000000000000000dbeda408e41 vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/liberty/0000775000175100017510000000000013244524600027647 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/liberty/contract/0000775000175100017510000000000013244524600031464 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000021400000000000011212 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/liberty/contract/393bf843b96_initial_liberty_no_op_contract_script.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/liberty/contract/393bf843b96_i0000666000175100017510000000164013244523345033335 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Initial Liberty no-op contract script. Revision ID: 393bf843b96 Revises: kilo Create Date: 2015-08-13 07:26:21.891165 """ from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '393bf843b96' down_revision = 'kilo' branch_labels = (cli.CONTRACT_BRANCH,) def upgrade(): pass ././@LongLink0000000000000000000000000000017500000000000011220 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/liberty/contract/3c88bdea3054_nsxv_vdr_dhcp_binding.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/liberty/contract/3c88bdea3054_0000666000175100017510000000231013244523345033366 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsxv_vdr_dhcp_binding.py Revision ID: 3c88bdea3054 Revises: 393bf843b96 Create Date: 2015-09-23 14:59:15.102609 """ from alembic import op from neutron.db import migration # revision identifiers, used by Alembic. revision = '3c88bdea3054' down_revision = '393bf843b96' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.LIBERTY, migration.MITAKA] def upgrade(): op.drop_constraint('unique_nsxv_vdr_dhcp_bindings0dhcp_router_id', 'nsxv_vdr_dhcp_bindings', 'unique') op.drop_column('nsxv_vdr_dhcp_bindings', 'dhcp_router_id') vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/0000775000175100017510000000000013244524600031126 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000021100000000000011207 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/53a3254aa95e_initial_liberty_no_op_expand_script.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/53a3254aa95e_in0000666000175100017510000000163613244523345033307 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Initial Liberty no-op expand script. Revision ID: 53a3254aa95e Revises: kilo Create Date: 2015-08-13 06:34:29.842396 """ from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '53a3254aa95e' down_revision = 'kilo' branch_labels = (cli.EXPAND_BRANCH,) def upgrade(): pass ././@LongLink0000000000000000000000000000020400000000000011211 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/279b70ac3ae8_nsxv3_add_l2gwconnection_table.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/279b70ac3ae8_ns0000666000175100017510000000273513244523345033406 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """NSXv3 Add l2gwconnection table Revision ID: 279b70ac3ae8 Revises: 28430956782d Create Date: 2015-08-14 02:04:09.807926 """ from alembic import op import sqlalchemy as sa from neutron.db import migration # revision identifiers, used by Alembic. revision = '279b70ac3ae8' down_revision = '28430956782d' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.LIBERTY] def upgrade(): op.create_table( 'nsx_l2gw_connection_mappings', sa.Column('connection_id', sa.String(length=36), nullable=False), sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('bridge_endpoint_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('connection_id'), ) ././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/28430956782d_nsxv3_security_groups.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/28430956782d_ns0000666000175100017510000000312513244523345033112 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """nsxv3_security_groups Revision ID: 28430956782d Revises: 53a3254aa95e Create Date: 2015-08-24 18:19:09.397813 """ # revision identifiers, used by Alembic. revision = '28430956782d' down_revision = '53a3254aa95e' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'neutron_nsx_firewall_section_mappings', sa.Column('neutron_id', sa.String(36), nullable=False), sa.Column('nsx_id', sa.String(36), nullable=False), sa.ForeignKeyConstraint(['neutron_id'], ['securitygroups.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('neutron_id')) op.create_table( 'neutron_nsx_rule_mappings', sa.Column('neutron_id', sa.String(36), nullable=False), sa.Column('nsx_id', sa.String(36), nullable=False), sa.ForeignKeyConstraint(['neutron_id'], ['securitygrouprules.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('neutron_id')) vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/pike/0000775000175100017510000000000013244524600027125 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/pike/contract/0000775000175100017510000000000013244524600030742 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000020400000000000011211 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/pike/contract/84ceffa27115_nsxv3_qos_policy_no_foreign_key.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/pike/contract/84ceffa27115_nsx0000666000175100017510000000250613244523345033407 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """remove the foreign key constrain from nsxv3_qos_policy_mapping Revision ID: 84ceffa27115 Revises: 8c0a81a07691 Create Date: 2017-03-15 11:47:09.450116 """ # revision identifiers, used by Alembic. revision = '84ceffa27115' down_revision = '8c0a81a07691' from alembic import op from sqlalchemy.engine import reflection from neutron.db import migration # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.PIKE] def upgrade(): table_name = 'neutron_nsx_qos_policy_mappings' inspector = reflection.Inspector.from_engine(op.get_bind()) fk_constraint = inspector.get_foreign_keys(table_name)[0] op.drop_constraint(fk_constraint.get('name'), table_name, type_='foreignkey') ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/pike/contract/8c0a81a07691_fix_ipam_table.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/pike/contract/8c0a81a07691_fix0000666000175100017510000000253113244523345033217 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Update the primary key constraint of nsx_subnet_ipam Revision ID: 8c0a81a07691 Revises: 14a89ddf96e2 Create Date: 2017-02-15 15:25:21.163418 """ # revision identifiers, used by Alembic. revision = '8c0a81a07691' down_revision = '14a89ddf96e2' from alembic import op from sqlalchemy.engine import reflection def upgrade(): table_name = 'nsx_subnet_ipam' # replace the old primary key constraint with a new one for both # subnet and nsx-pool inspector = reflection.Inspector.from_engine(op.get_bind()) pk_constraint = inspector.get_pk_constraint(table_name) op.drop_constraint(pk_constraint.get('name'), table_name, type_='primary') op.create_primary_key(None, table_name, ['subnet_id', 'nsx_pool_id']) vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/0000775000175100017510000000000013244524600030404 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/8699700cd95c_nsxv_bgp_speaker_mapping.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/8699700cd95c_nsxv_0000666000175100017510000000356713244523345033264 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsxv_bgp_speaker_mapping Revision ID: 8699700cd95c Revises: 7c4704ad37df Create Date: 2017-02-16 03:13:39.775670 """ # revision identifiers, used by Alembic. revision = '8699700cd95c' down_revision = '7c4704ad37df' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'nsxv_bgp_speaker_bindings', sa.Column('edge_id', sa.String(36), nullable=False), sa.Column('bgp_speaker_id', sa.String(36), nullable=False), sa.Column('bgp_identifier', sa.String(64), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.ForeignKeyConstraint(['bgp_speaker_id'], ['bgp_speakers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('edge_id')) op.create_table( 'nsxv_bgp_peer_edge_bindings', sa.Column('peer_id', sa.String(36), nullable=False), sa.Column('edge_id', sa.String(36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.ForeignKeyConstraint(['peer_id'], ['bgp_peers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('peer_id')) ././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/53eb497903a4_drop_vdr_dhcp_bindings.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/53eb497903a4_drop_0000666000175100017510000000162213244523345033205 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Drop VDR DHCP bindings table Revision ID: 53eb497903a4 Revises: 8699700cd95c Create Date: 2017-02-22 10:10:59.990122 """ # revision identifiers, used by Alembic. revision = '53eb497903a4' down_revision = '8699700cd95c' from alembic import op def upgrade(): op.drop_table('nsxv_vdr_dhcp_bindings') ././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/e4c503f4133f_port_vnic_type_support.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/e4c503f4133f_port_0000666000175100017510000000260113244523345033272 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Port vnic_type support Revision ID: e4c503f4133f Revises: 01a33f93f5fd Create Date: 2017-02-20 00:05:30.894680 """ # revision identifiers, used by Alembic. revision = 'e4c503f4133f' down_revision = '01a33f93f5fd' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'nsxv_port_ext_attributes', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('vnic_type', sa.String(length=64), nullable=False, server_default='normal'), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('created_at', sa.DateTime(), nullable=True), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id')) ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/7c4704ad37df_nsxv_lbv2_l7pol_fix.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/7c4704ad37df_nsxv_0000666000175100017510000000360213244523345033375 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fix NSX Lbaas L7 policy table creation Revision ID: 7c4704ad37df Revises: e4c503f4133f Create Date: 2017-02-22 10:10:59.990122 """ # revision identifiers, used by Alembic. revision = '7c4704ad37df' down_revision = 'e4c503f4133f' from alembic import op import sqlalchemy as sa from neutron.db import migration def upgrade(): # On a previous upgrade this table was created conditionally. # It should always be created, and just the ForeignKeyConstraint # should be conditional if not migration.schema_has_table('nsxv_lbaas_l7policy_bindings'): op.create_table( 'nsxv_lbaas_l7policy_bindings', sa.Column('policy_id', sa.String(length=36), nullable=False), sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('edge_app_rule_id', sa.String(length=36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('policy_id')) if migration.schema_has_table('lbaas_l7policies'): op.create_foreign_key( 'fk_lbaas_l7policies_id', 'nsxv_lbaas_l7policy_bindings', 'lbaas_l7policies', ['policy_id'], ['id'], ondelete='CASCADE') ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/ea7a72ab9643_nsxv3_lbaas_mapping.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/ea7a72ab9643_nsxv30000666000175100017510000001140013244523345033312 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic import op import sqlalchemy as sa from neutron.db import migration """nsxv3_lbaas_mapping Revision ID: ea7a72ab9643 Revises: 53eb497903a4 Create Date: 2017-06-12 16:59:48.021909 """ # revision identifiers, used by Alembic. revision = 'ea7a72ab9643' down_revision = '53eb497903a4' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.PIKE] def upgrade(): op.create_table( 'nsxv3_lbaas_loadbalancers', sa.Column('loadbalancer_id', sa.String(36), nullable=False), sa.Column('lb_router_id', sa.String(36), nullable=False), sa.Column('lb_service_id', sa.String(36), nullable=False), sa.Column('vip_address', sa.String(36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('loadbalancer_id')) op.create_table( 'nsxv3_lbaas_listeners', sa.Column('loadbalancer_id', sa.String(36), nullable=False), sa.Column('listener_id', sa.String(36), nullable=False), sa.Column('app_profile_id', sa.String(36), nullable=False), sa.Column('lb_vs_id', sa.String(36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('loadbalancer_id', 'listener_id')) op.create_table( 'nsxv3_lbaas_pools', sa.Column('loadbalancer_id', sa.String(36), nullable=False), sa.Column('pool_id', sa.String(36), nullable=False), sa.Column('lb_pool_id', sa.String(36), nullable=False), sa.Column('lb_vs_id', sa.String(36), nullable=True), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('loadbalancer_id', 'pool_id')) op.create_table( 'nsxv3_lbaas_monitors', sa.Column('loadbalancer_id', sa.String(36), nullable=False), sa.Column('pool_id', sa.String(36), nullable=False), sa.Column('hm_id', sa.String(36), nullable=False), sa.Column('lb_monitor_id', sa.String(36), nullable=False), sa.Column('lb_pool_id', sa.String(36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('loadbalancer_id', 'pool_id', 'hm_id')) op.create_table( 'nsxv3_lbaas_l7rules', sa.Column('loadbalancer_id', sa.String(36), nullable=False), sa.Column('l7policy_id', sa.String(36), nullable=False), sa.Column('l7rule_id', sa.String(36), nullable=False), sa.Column('lb_rule_id', sa.String(36), nullable=False), sa.Column('lb_vs_id', sa.String(36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('loadbalancer_id', 'l7policy_id', 'l7rule_id')) if migration.schema_has_table('lbaas_loadbalancers'): op.create_foreign_key( 'fk_nsxv3_lbaas_loadbalancers_id', 'nsxv3_lbaas_loadbalancers', 'lbaas_loadbalancers', ['loadbalancer_id'], ['id'], ondelete='CASCADE') if migration.schema_has_table('lbaas_listeners'): op.create_foreign_key( 'fk_nsxv3_lbaas_listeners_id', 'nsxv3_lbaas_listeners', 'lbaas_listeners', ['listener_id'], ['id'], ondelete='CASCADE') if migration.schema_has_table('lbaas_pools'): op.create_foreign_key( 'fk_nsxv3_lbaas_pools_id', 'nsxv3_lbaas_pools', 'lbaas_pools', ['pool_id'], ['id'], ondelete='CASCADE') if migration.schema_has_table('lbaas_healthmonitors'): op.create_foreign_key( 'fk_nsxv3_lbaas_healthmonitors_id', 'nsxv3_lbaas_monitors', 'lbaas_healthmonitors', ['hm_id'], ['id'], ondelete='CASCADE') if migration.schema_has_table('lbaas_l7rules'): op.create_foreign_key( 'fk_nsxv3_lbaas_l7rules_id', 'nsxv3_lbaas_l7rules', 'lbaas_l7rules', ['l7rule_id'], ['id'], ondelete='CASCADE') vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/CONTRACT_HEAD0000666000175100017510000000001513244523345030121 0ustar zuulzuul00000000000000717f7f63a219 vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/0000775000175100017510000000000013244524600027443 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/0000775000175100017510000000000013244524600030722 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/4c45bcadccf9_extend_secgroup_rule.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/4c45bcadccf9_ext0000666000175100017510000000247313244523345033600 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """extend_secgroup_rule Revision ID: 4c45bcadccf9 Revises: 20483029f1ff Create Date: 2016-03-01 06:12:09.450116 """ from alembic import op import sqlalchemy as sa from neutron.db import migration # revision identifiers, used by Alembic. revision = '4c45bcadccf9' down_revision = '20483029f1ff' neutron_milestone = [migration.MITAKA] def upgrade(): op.create_table( 'nsxv_extended_security_group_rule_properties', sa.Column('rule_id', sa.String(36), nullable=False), sa.Column('local_ip_prefix', sa.String(255), nullable=False), sa.ForeignKeyConstraint(['rule_id'], ['securitygrouprules.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('rule_id')) ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/312211a5725f_nsxv_lbv2.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/312211a5725f_nsx0000666000175100017510000000577513244523345033145 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """nsxv_lbv2 Revision ID: 312211a5725f Revises: 279b70ac3ae8 Create Date: 2015-09-09 02:02:59.990122 """ # revision identifiers, used by Alembic. revision = '312211a5725f' down_revision = '279b70ac3ae8' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'nsxv_lbaas_loadbalancer_bindings', sa.Column('loadbalancer_id', sa.String(length=36), nullable=False), sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('edge_fw_rule_id', sa.String(length=36), nullable=False), sa.Column('vip_address', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('loadbalancer_id')) op.create_table( 'nsxv_lbaas_listener_bindings', sa.Column('loadbalancer_id', sa.String(length=36), nullable=False), sa.Column('listener_id', sa.String(length=36), nullable=False), sa.Column('app_profile_id', sa.String(length=36), nullable=False), sa.Column('vse_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('loadbalancer_id', 'listener_id')) op.create_table( 'nsxv_lbaas_pool_bindings', sa.Column('loadbalancer_id', sa.String(length=36), nullable=False), sa.Column('listener_id', sa.String(length=36), nullable=False), sa.Column('pool_id', sa.String(length=36), nullable=False), sa.Column('edge_pool_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('loadbalancer_id', 'listener_id', 'pool_id')) op.create_table( 'nsxv_lbaas_monitor_bindings', sa.Column('loadbalancer_id', sa.String(length=36), nullable=False), sa.Column('listener_id', sa.String(length=36), nullable=False), sa.Column('pool_id', sa.String(length=36), nullable=False), sa.Column('hm_id', sa.String(length=36), nullable=False), sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('edge_mon_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('loadbalancer_id', 'listener_id', 'pool_id', 'hm_id', 'edge_id')) op.create_table( 'nsxv_lbaas_certificate_bindings', sa.Column('cert_id', sa.String(length=128), nullable=False), sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('edge_cert_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('cert_id', 'edge_id')) ././@LongLink0000000000000000000000000000020000000000000011205 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/2af850eb3970_update_nsxv_tz_binding_type.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/2af850eb3970_upd0000666000175100017510000000260513244523345033266 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """update nsxv tz binding type Revision ID: 2af850eb3970 Revises: 312211a5725f Create Date: 2015-11-24 13:44:08.664653 """ # revision identifiers, used by Alembic. revision = '2af850eb3970' down_revision = '312211a5725f' from alembic import op import sqlalchemy as sa tz_binding_type_enum = sa.Enum('flat', 'vlan', 'portgroup', name='nsxv_tz_network_bindings_binding_type') new_tz_binding_type_enum = sa.Enum( 'flat', 'vlan', 'portgroup', 'vxlan', name='nsxv_tz_network_bindings_binding_type') def upgrade(): op.alter_column( 'nsxv_tz_network_bindings', 'binding_type', type_=new_tz_binding_type_enum, existing_type=tz_binding_type_enum, existing_nullable=False) ././@LongLink0000000000000000000000000000017700000000000011222 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/20483029f1ff_update_tz_network_bindings.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/20483029f1ff_upd0000666000175100017510000000266213244523345033210 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """update nsx_v3 tz_network_bindings_binding_type Revision ID: 20483029f1ff Revises: 69fb78b33d41 Create Date: 2016-02-09 13:57:01.590154 """ # revision identifiers, used by Alembic. revision = '20483029f1ff' down_revision = '69fb78b33d41' from alembic import op import sqlalchemy as sa old_tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', name='tz_network_bindings_binding_type') new_tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', 'vxlan', name='tz_network_bindings_binding_type') def upgrade(): op.alter_column( 'tz_network_bindings', 'binding_type', type_=new_tz_binding_type_enum, existing_type=old_tz_binding_type_enum, existing_nullable=False) ././@LongLink0000000000000000000000000000020600000000000011213 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/69fb78b33d41_nsxv_add_search_domain_to_subnets.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/69fb78b33d41_nsx0000666000175100017510000000243713244523345033320 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSXv add dns search domain to subnets Revision ID: 69fb78b33d41 Revises: 2af850eb3970 Create Date: 2016-01-27 07:28:35.369938 """ # revision identifiers, used by Alembic. revision = '69fb78b33d41' down_revision = '2af850eb3970' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'nsxv_subnet_ext_attributes', sa.Column('subnet_id', sa.String(length=36), nullable=False), sa.Column('dns_search_domain', sa.String(length=255), nullable=False), sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('subnet_id') ) vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/0000775000175100017510000000000013244524600027507 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/0000775000175100017510000000000013244524600031324 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000020300000000000011210 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/dbe29d208ac6_nsxv_add_dhcp_mtu_to_subnets.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/dbe29d208ac6_n0000666000175100017510000000244513244523345033475 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSXv add DHCP MTU to subnets Revision ID: dbe29d208ac6 Revises: 081af0e396d7 Create Date: 2016-07-21 05:03:35.369938 """ # revision identifiers, used by Alembic. revision = 'dbe29d208ac6' down_revision = '081af0e396d7' from alembic import op import sqlalchemy as sa def upgrade(): # Add a new column and make the previous column nullable, # because it is enough that one of them is non-null op.add_column('nsxv_subnet_ext_attributes', sa.Column('dhcp_mtu', sa.Integer, nullable=True)) op.alter_column('nsxv_subnet_ext_attributes', 'dns_search_domain', nullable=True, existing_type=sa.String(length=255), existing_nullable=False) ././@LongLink0000000000000000000000000000020500000000000011212 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/081af0e396d7_nsx_extended_rule_table_rename.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/081af0e396d7_n0000666000175100017510000000176013244523345033340 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsxv3_secgroup_local_ip_prefix Revision ID: 081af0e396d7 Revises: 5ed1ffbc0d2a Create Date: 2016-03-24 07:11:30.300482 """ # revision identifiers, used by Alembic. revision = '081af0e396d7' down_revision = '5ed1ffbc0d2a' from alembic import op def upgrade(): op.rename_table('nsxv_extended_security_group_rule_properties', 'nsx_extended_security_group_rule_properties') ././@LongLink0000000000000000000000000000020000000000000011205 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/d49ac91b560e_nsxv_lbaasv2_shared_pools.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/d49ac91b560e_n0000666000175100017510000000343713244523345033422 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Support shared pools with NSXv LBaaSv2 driver Revision ID: d49ac91b560e Revises: dbe29d208ac6 Create Date: 2016-07-21 05:03:35.369938 """ from alembic import op from sqlalchemy.engine import reflection from neutron.db import migration # revision identifiers, used by Alembic. revision = 'd49ac91b560e' down_revision = 'dbe29d208ac6' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.NEWTON] def upgrade(): change_pk_constraint('nsxv_lbaas_pool_bindings', ['loadbalancer_id', 'pool_id']) change_pk_constraint('nsxv_lbaas_monitor_bindings', ['loadbalancer_id', 'pool_id', 'hm_id', 'edge_id']) def change_pk_constraint(table_name, columns): inspector = reflection.Inspector.from_engine(op.get_bind()) pk_constraint = inspector.get_pk_constraint(table_name) op.drop_constraint(pk_constraint.get('name'), table_name, type_='primary') op.drop_column(table_name, 'listener_id') op.create_primary_key(None, table_name, columns) ././@LongLink0000000000000000000000000000020100000000000011206 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/5ed1ffbc0d2a_nsx_security_group_logging.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/5ed1ffbc0d2a_n0000666000175100017510000000503613244523345033625 0ustar zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """nsxv_security_group_logging Revision ID: 5ed1ffbc0d2a Revises: 3e4dccfe6fb4 Create Date: 2016-03-24 06:06:06.680092 """ # revision identifiers, used by Alembic. revision = '5ed1ffbc0d2a' down_revision = '3c88bdea3054' depends_on = ('3e4dccfe6fb4',) from alembic import op import sqlalchemy as sa def upgrade(): secgroup_prop_table = sa.Table( 'nsx_extended_security_group_properties', sa.MetaData(), sa.Column('security_group_id', sa.String(36), nullable=False), sa.Column('logging', sa.Boolean(), nullable=False)) op.bulk_insert(secgroup_prop_table, get_values()) op.drop_column('nsxv_security_group_section_mappings', 'logging') def get_values(): values = [] session = sa.orm.Session(bind=op.get_bind()) section_mapping_table = sa.Table('nsxv_security_group_section_mappings', sa.MetaData(), sa.Column('neutron_id', sa.String(36)), sa.Column('logging', sa.Boolean(), nullable=False)) secgroup_table = sa.Table('securitygroups', sa.MetaData(), sa.Column('id', sa.String(36))) # If we run NSX-V plugin then we want the current values for security-group # logging, taken from the section mapping table. for row in session.query(section_mapping_table).all(): values.append({'security_group_id': row.neutron_id, 'logging': row.logging}) # If we run NSX-V3 plugin then previous table is empty, since # security-group logging isn't supported on previous versions, we set the # current value to false (the default). if not values: for row in session.query(secgroup_table).all(): values.append({'security_group_id': row.id, 'logging': False}) session.commit() return values vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/0000775000175100017510000000000013244524600030766 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000017700000000000011222 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/3e4dccfe6fb4_nsx_security_group_logging.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/3e4dccfe6fb4_nsx0000666000175100017510000000246013244523345033654 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSXv add dns search domain to subnets Revision ID: 3e4dccfe6fb4 Revises: 2c87aedb206f Create Date: 2016-03-20 07:28:35.369938 """ # revision identifiers, used by Alembic. revision = '3e4dccfe6fb4' down_revision = '2c87aedb206f' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'nsx_extended_security_group_properties', sa.Column('security_group_id', sa.String(36), nullable=False), sa.Column('logging', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('security_group_id') ) ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/7e46906f8997_lbaas_foreignkeys.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/7e46906f8997_lba0000666000175100017510000000350313244523345033172 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """lbaas foreignkeys Revision ID: 7e46906f8997 Revises: aede17d51d0f Create Date: 2016-04-21 10:45:32.278433 """ # revision identifiers, used by Alembic. revision = '7e46906f8997' down_revision = 'aede17d51d0f' from alembic import op from neutron.db import migration def upgrade(): if migration.schema_has_table('lbaas_loadbalancers'): op.create_foreign_key( 'fk_lbaas_loadbalancers_id', 'nsxv_lbaas_loadbalancer_bindings', 'lbaas_loadbalancers', ['loadbalancer_id'], ['id'], ondelete='CASCADE') if migration.schema_has_table('lbaas_listeners'): op.create_foreign_key( 'fk_lbaas_listeners_id', 'nsxv_lbaas_listener_bindings', 'lbaas_listeners', ['listener_id'], ['id'], ondelete='CASCADE') if migration.schema_has_table('lbaas_pools'): op.create_foreign_key( 'fk_lbaas_pools_id', 'nsxv_lbaas_pool_bindings', 'lbaas_pools', ['pool_id'], ['id'], ondelete='CASCADE') if migration.schema_has_table('lbaas_healthmonitors'): op.create_foreign_key( 'fk_lbaas_healthmonitors_id', 'nsxv_lbaas_monitor_bindings', 'lbaas_healthmonitors', ['hm_id'], ['id'], ondelete='CASCADE') ././@LongLink0000000000000000000000000000020200000000000011207 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/967462f585e1_add_dvs_id_to_switch_mappings.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/967462f585e1_add0000666000175100017510000000201313244523345033143 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add dvs_id column to neutron_nsx_network_mappings Revision ID: 967462f585e1 Revises: 3e4dccfe6fb4 Create Date: 2016-02-23 18:22:01.998540 """ # revision identifiers, used by Alembic. revision = '967462f585e1' down_revision = '3e4dccfe6fb4' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('neutron_nsx_network_mappings', sa.Column('dvs_id', sa.String(36), nullable=True)) ././@LongLink0000000000000000000000000000017500000000000011220 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/b7f41687cbad_nsxv3_qos_policy_mapping.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/b7f41687cbad_nsx0000666000175100017510000000236313244523345033516 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsxv3_qos_policy_mapping Revision ID: b7f41687cbad Revises: 967462f585e1 Create Date: 2016-03-17 06:12:09.450116 """ # revision identifiers, used by Alembic. revision = 'b7f41687cbad' down_revision = '967462f585e1' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'neutron_nsx_qos_policy_mappings', sa.Column('qos_policy_id', sa.String(36), nullable=False), sa.Column('switch_profile_id', sa.String(36), nullable=False), sa.ForeignKeyConstraint(['qos_policy_id'], ['qos_policies.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('qos_policy_id')) ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/aede17d51d0f_timestamps.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/aede17d51d0f_tim0000666000175100017510000000476713244523345033561 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add timestamp Revision ID: aede17d51d0f Revises: 5e564e781d77 Create Date: 2016-04-21 10:45:32.278433 """ # revision identifiers, used by Alembic. revision = 'aede17d51d0f' down_revision = '5e564e781d77' from alembic import op import sqlalchemy as sa tables = [ 'nsxv_router_bindings', 'nsxv_edge_vnic_bindings', 'nsxv_edge_dhcp_static_bindings', 'nsxv_internal_networks', 'nsxv_internal_edges', 'nsxv_security_group_section_mappings', 'nsxv_rule_mappings', 'nsxv_port_vnic_mappings', 'nsxv_router_ext_attributes', 'nsxv_tz_network_bindings', 'nsxv_port_index_mappings', 'nsxv_firewall_rule_bindings', 'nsxv_spoofguard_policy_network_mappings', 'nsxv_vdr_dhcp_bindings', 'nsxv_lbaas_loadbalancer_bindings', 'nsxv_lbaas_listener_bindings', 'nsxv_lbaas_pool_bindings', 'nsxv_lbaas_monitor_bindings', 'nsxv_lbaas_certificate_bindings', 'nsxv_subnet_ext_attributes', 'tz_network_bindings', 'neutron_nsx_network_mappings', 'neutron_nsx_security_group_mappings', 'neutron_nsx_firewall_section_mappings', 'neutron_nsx_rule_mappings', 'neutron_nsx_port_mappings', 'neutron_nsx_router_mappings', 'neutron_nsx_service_bindings', 'neutron_nsx_dhcp_bindings', 'multi_provider_networks', 'networkconnections', 'networkgatewaydevicereferences', 'networkgatewaydevices', 'networkgateways', 'maclearningstates', 'lsn_port', 'lsn', 'qosqueues', 'portqueuemappings', 'networkqueuemappings', 'nsx_l2gw_connection_mappings', 'neutron_nsx_qos_policy_mappings', 'vcns_router_bindings'] def upgrade(): for table in tables: op.add_column( table, sa.Column(u'created_at', sa.DateTime(), nullable=True) ) op.add_column( table, sa.Column(u'updated_at', sa.DateTime(), nullable=True) ) ././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/5e564e781d77_add_nsx_binding_type.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/5e564e781d77_add0000666000175100017510000000270113244523345033227 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add nsx binding type Revision ID: 5e564e781d77 Revises: c644ec62c585 Create Date: 2016-06-27 23:58:22.003350 """ # revision identifiers, used by Alembic. revision = '5e564e781d77' down_revision = 'c644ec62c585' from alembic import op import sqlalchemy as sa tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', 'vxlan', name='tz_network_bindings_binding_type') new_tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', 'vxlan', 'portgroup', name='tz_network_bindings_binding_type') def upgrade(): op.alter_column( 'tz_network_bindings', 'binding_type', type_=new_tz_binding_type_enum, existing_type=tz_binding_type_enum, existing_nullable=False) ././@LongLink0000000000000000000000000000020000000000000011205 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/1b4eaffe4f31_nsx_provider_security_group.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/1b4eaffe4f31_nsx0000666000175100017510000000213513244523345033564 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSX Adds a 'provider' attribute to security-group Revision ID: 1b4eaffe4f31 Revises: 633514d94b93 Create Date: 2016-07-17 11:30:31.263918 """ # revision identifiers, used by Alembic. revision = '1b4eaffe4f31' down_revision = '633514d94b93' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('nsx_extended_security_group_properties', sa.Column('provider', sa.Boolean(), default=False, server_default=sa.false(), nullable=False)) ././@LongLink0000000000000000000000000000020000000000000011205 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/2c87aedb206f_nsxv_security_group_logging.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/2c87aedb206f_nsx0000666000175100017510000000201013244523345033476 0ustar zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """nsxv_security_group_logging Revision ID: 2c87aedb206f Revises: 4c45bcadccf9 Create Date: 2016-03-15 06:06:06.680092 """ # revision identifiers, used by Alembic. revision = '2c87aedb206f' down_revision = '4c45bcadccf9' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('nsxv_security_group_section_mappings', sa.Column('logging', sa.Boolean(), nullable=False)) ././@LongLink0000000000000000000000000000021500000000000011213 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/c288bb6a7252_nsxv_add_resource_pool_to_router_mapping.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/c288bb6a7252_nsx0000666000175100017510000000224013244523345033345 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSXv add resource pool to the router bindings table Revision ID: c288bb6a7252 Revises: b7f41687cbad Create Date: 2016-05-15 06:12:09.450116 """ # revision identifiers, used by Alembic. revision = 'c288bb6a7252' down_revision = 'b7f41687cbad' from alembic import op from oslo_config import cfg import sqlalchemy as sa from vmware_nsx.common import config # noqa def upgrade(): op.add_column('nsxv_router_bindings', sa.Column('resource_pool', sa.String(36), nullable=True, server_default=cfg.CONF.nsxv.resource_pool_id)) ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/6e6da8296c0e_add_nsxv_ipam.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/6e6da8296c0e_add0000666000175100017510000000231113244523345033353 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add support for IPAM in NSXv Revision ID: 6e6da8296c0e Revises: 1b4eaffe4f31 Create Date: 2016-09-01 10:17:16.770021 """ revision = '6e6da8296c0e' down_revision = '1b4eaffe4f31' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'nsxv_subnet_ipam', sa.Column('subnet_id', sa.String(length=36), nullable=False), sa.Column('nsx_pool_id', sa.String(length=36), nullable=False), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('created_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('nsx_pool_id'), ) ././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/633514d94b93_add_support_for_taas.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/633514d94b93_add0000666000175100017510000000216213244523345033135 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add support for TaaS Revision ID: 633514d94b93 Revises: 86a55205337c Create Date: 2016-05-09 14:11:31.940021 """ revision = '633514d94b93' down_revision = '86a55205337c' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'nsx_port_mirror_session_mappings', sa.Column('tap_flow_id', sa.String(length=36), nullable=False), sa.Column('port_mirror_session_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('tap_flow_id'), ) ././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/7b5ec3caa9a4_nsxv_fix_az_default.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/7b5ec3caa9a4_nsx0000666000175100017510000000232113244523345033561 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fix the availability zones default value in the router bindings table Revision ID: 7b5ec3caa9a4 Revises: 6e6da8296c0e Create Date: 2016-09-07 11:38:35.369938 """ from alembic import op from neutron.db import migration # revision identifiers, used by Alembic. revision = '7b5ec3caa9a4' down_revision = '6e6da8296c0e' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.NEWTON] def upgrade(): #previous migration left this column empty instead of 'default' op.execute("UPDATE nsxv_router_bindings SET availability_zone='default' " "where availability_zone is NULL") ././@LongLink0000000000000000000000000000021200000000000011210 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/86a55205337c_nsxv_availability_zone_router_mapping.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/86a55205337c_nsx0000666000175100017510000000236513244523345033215 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSXv add availability zone to the router bindings table instead of the resource pool column Revision ID: 86a55205337c Revises: 7e46906f8997 Create Date: 2016-07-12 09:18:44.450116 """ # revision identifiers, used by Alembic. revision = '86a55205337c' down_revision = '7e46906f8997' from alembic import op import sqlalchemy as sa from vmware_nsx.common import config # noqa def upgrade(): op.alter_column('nsxv_router_bindings', 'resource_pool', new_column_name='availability_zone', existing_type=sa.String(36), existing_nullable=True, existing_server_default='default') ././@LongLink0000000000000000000000000000020600000000000011213 Lustar 00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/c644ec62c585_nsxv3_add_nsx_dhcp_service_tables.pyvmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/c644ec62c585_nsx0000666000175100017510000000412413244523345033356 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSXv3 add nsx_service_bindings and nsx_dhcp_bindings tables Revision ID: c644ec62c585 Revises: c288bb6a7252 Create Date: 2016-04-29 23:19:39.523196 """ # revision identifiers, used by Alembic. revision = 'c644ec62c585' down_revision = 'c288bb6a7252' from alembic import op import sqlalchemy as sa from vmware_nsxlib.v3 import nsx_constants nsx_service_type_enum = sa.Enum( nsx_constants.SERVICE_DHCP, name='neutron_nsx_service_bindings_service_type') def upgrade(): op.create_table( 'neutron_nsx_service_bindings', sa.Column('network_id', sa.String(36), nullable=False), sa.Column('port_id', sa.String(36), nullable=True), sa.Column('nsx_service_type', nsx_service_type_enum, nullable=False), sa.Column('nsx_service_id', sa.String(36), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id', 'nsx_service_type')) op.create_table( 'neutron_nsx_dhcp_bindings', sa.Column('port_id', sa.String(36), nullable=False), sa.Column('subnet_id', sa.String(36), nullable=False), sa.Column('ip_address', sa.String(64), nullable=False), sa.Column('nsx_service_id', sa.String(36), nullable=False), sa.Column('nsx_binding_id', sa.String(36), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id', 'nsx_binding_id')) vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/script.py.mako0000666000175100017510000000201413244523345027135 0ustar zuulzuul00000000000000# Copyright ${create_date.year} VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} % if branch_labels: branch_labels = ${repr(branch_labels)} %endif from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/env.py0000666000175100017510000000667713244523345025516 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging import config as logging_config from alembic import context from neutron_lib.db import model_base from oslo_config import cfg from oslo_db.sqlalchemy import session import sqlalchemy as sa from sqlalchemy import event from neutron.db.migration.alembic_migrations import external from neutron.db.migration.models import head # noqa from vmware_nsx.db.migration import alembic_migrations MYSQL_ENGINE = None # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config neutron_config = config.neutron_config # Interpret the config file for Python logging. # This line sets up loggers basically. logging_config.fileConfig(config.config_file_name) # set the target for 'autogenerate' support target_metadata = model_base.BASEV2.metadata def set_mysql_engine(): try: mysql_engine = neutron_config.command.mysql_engine except cfg.NoSuchOptError: mysql_engine = None global MYSQL_ENGINE MYSQL_ENGINE = (mysql_engine or model_base.BASEV2.__table_args__['mysql_engine']) def include_object(object, name, type_, reflected, compare_to): if (type_ == 'table' and name in set(external.TABLES) - set(external.REPO_VMWARE_TABLES)): return False else: return True def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with either a URL or an Engine. Calls to context.execute() here emit the given string to the script output. """ set_mysql_engine() kwargs = dict() if neutron_config.database.connection: kwargs['url'] = neutron_config.database.connection else: kwargs['dialect_name'] = neutron_config.database.engine kwargs['include_object'] = include_object kwargs['version_table'] = alembic_migrations.VERSION_TABLE context.configure(**kwargs) with context.begin_transaction(): context.run_migrations() @event.listens_for(sa.Table, 'after_parent_attach') def set_storage_engine(target, parent): if MYSQL_ENGINE: target.kwargs['mysql_engine'] = MYSQL_ENGINE def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ set_mysql_engine() engine = session.create_engine(neutron_config.database.connection) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata, include_object=include_object, version_table=alembic_migrations.VERSION_TABLE ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() engine.dispose() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() vmware-nsx-12.0.1/vmware_nsx/db/migration/alembic_migrations/__init__.py0000666000175100017510000000120413244523345026442 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. VERSION_TABLE = 'vmware_alembic_version' vmware-nsx-12.0.1/vmware_nsx/db/migration/models/0000775000175100017510000000000013244524600021760 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/models/__init__.py0000666000175100017510000000000013244523345024066 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/db/migration/models/head.py0000666000175100017510000000171513244523345023246 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db.migration.models import head from vmware_nsx.db import extended_security_group # noqa from vmware_nsx.db import extended_security_group_rule # noqa from vmware_nsx.db import nsx_models # noqa from vmware_nsx.db import nsxv_models # noqa from vmware_nsx.db import vcns_models # noqa def get_metadata(): return head.model_base.BASEV2.metadata vmware-nsx-12.0.1/vmware_nsx/db/migration/__init__.py0000666000175100017510000000000013244523345022603 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/db/qos_db.py0000666000175100017510000002711113244523345020336 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from sqlalchemy.orm import exc from neutron.db import _model_query as model_query from neutron.db import _resource_extend as resource_extend from neutron.db import _utils as db_utils from neutron.db import api as db_api from neutron.db import models_v2 from neutron_lib.api.definitions import network as net_def from neutron_lib.api.definitions import port as port_def from oslo_log import log from oslo_utils import uuidutils from vmware_nsx.db import nsx_models from vmware_nsx.extensions import qos_queue as qos LOG = log.getLogger(__name__) @resource_extend.has_resource_extenders class QoSDbMixin(qos.QueuePluginBase): """Mixin class to add queues.""" def create_qos_queue(self, context, qos_queue): q = qos_queue['qos_queue'] with db_api.context_manager.writer.using(context): qos_queue = nsx_models.QoSQueue( id=q.get('id', uuidutils.generate_uuid()), name=q.get('name'), tenant_id=q['tenant_id'], default=q.get('default'), min=q.get('min'), max=q.get('max'), qos_marking=q.get('qos_marking'), dscp=q.get('dscp')) context.session.add(qos_queue) return self._make_qos_queue_dict(qos_queue) def get_qos_queue(self, context, queue_id, fields=None): return self._make_qos_queue_dict( self._get_qos_queue(context, queue_id), fields) def _get_qos_queue(self, context, queue_id): try: return model_query.get_by_id(context, nsx_models.QoSQueue, queue_id) except exc.NoResultFound: raise qos.QueueNotFound(id=queue_id) def get_qos_queues(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = db_utils.get_marker_obj(self, context, 'qos_queue', limit, marker) return model_query.get_collection(context, nsx_models.QoSQueue, self._make_qos_queue_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def delete_qos_queue(self, context, queue_id): with db_api.context_manager.writer.using(context): qos_queue = self._get_qos_queue(context, queue_id) context.session.delete(qos_queue) def _process_port_queue_mapping(self, context, port_data, queue_id): port_data[qos.QUEUE] = queue_id if not queue_id: return with db_api.context_manager.writer.using(context): context.session.add(nsx_models.PortQueueMapping( port_id=port_data['id'], queue_id=queue_id)) def _get_port_queue_bindings(self, context, filters=None, fields=None): return model_query.get_collection(context, nsx_models.PortQueueMapping, self._make_port_queue_binding_dict, filters=filters, fields=fields) def _delete_port_queue_mapping(self, context, port_id): query = model_query.query_with_hooks(context, nsx_models.PortQueueMapping) try: binding = query.filter( nsx_models.PortQueueMapping.port_id == port_id).one() except exc.NoResultFound: # return since this can happen if we are updating a port that # did not already have a queue on it. There is no need to check # if there is one before deleting if we return here. return with db_api.context_manager.writer.using(context): context.session.delete(binding) def _process_network_queue_mapping(self, context, net_data, queue_id): net_data[qos.QUEUE] = queue_id if not queue_id: return with db_api.context_manager.writer.using(context): context.session.add( nsx_models.NetworkQueueMapping(network_id=net_data['id'], queue_id=queue_id)) def _get_network_queue_bindings(self, context, filters=None, fields=None): return model_query.get_collection( context, nsx_models.NetworkQueueMapping, self._make_network_queue_binding_dict, filters=filters, fields=fields) def _delete_network_queue_mapping(self, context, network_id): query = self._model_query(context, nsx_models.NetworkQueueMapping) with db_api.context_manager.writer.using(context): binding = query.filter_by(network_id=network_id).first() if binding: context.session.delete(binding) @staticmethod @resource_extend.extends([net_def.COLLECTION_NAME]) @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_dict_qos_queue(obj_res, obj_db): queue_mapping = obj_db['qos_queue'] if queue_mapping: obj_res[qos.QUEUE] = queue_mapping.get('queue_id') return obj_res def _make_qos_queue_dict(self, queue, fields=None): res = {'id': queue['id'], 'name': queue.get('name'), 'default': queue.get('default'), 'tenant_id': queue['tenant_id'], 'min': queue.get('min'), 'max': queue.get('max'), 'qos_marking': queue.get('qos_marking'), 'dscp': queue.get('dscp')} return db_utils.resource_fields(res, fields) def _make_port_queue_binding_dict(self, queue, fields=None): res = {'port_id': queue['port_id'], 'queue_id': queue['queue_id']} return db_utils.resource_fields(res, fields) def _make_network_queue_binding_dict(self, queue, fields=None): res = {'network_id': queue['network_id'], 'queue_id': queue['queue_id']} return db_utils.resource_fields(res, fields) def _check_for_queue_and_create(self, context, port): """Check for queue and create. This function determines if a port should be associated with a queue. It works by first querying NetworkQueueMapping to determine if the network is associated with a queue. If so, then it queries NetworkQueueMapping for all the networks that are associated with this queue. Next, it queries against all the ports on these networks with the port device_id. Finally it queries PortQueueMapping. If that query returns a queue_id that is returned. Otherwise a queue is created that is the size of the queue associated with the network and that queue_id is returned. If the network is not associated with a queue we then query to see if there is a default queue in the system. If so, a copy of that is created and the queue_id is returned. Otherwise None is returned. None is also returned if the port does not have a device_id or if the device_owner is network: """ queue_to_create = None # If there is no device_id don't create a queue. The queue will be # created on update port when the device_id is present. Also don't # apply QoS to network ports. if (not port.get('device_id') or port['device_owner'].startswith('network:')): return # Check if there is a queue associated with the network filters = {'network_id': [port['network_id']]} network_queue_id = self._get_network_queue_bindings( context, filters, ['queue_id']) if network_queue_id: # get networks that queue is associated with filters = {'queue_id': [network_queue_id[0]['queue_id']]} networks_with_same_queue = self._get_network_queue_bindings( context, filters) # get the ports on these networks with the same_queue and device_id filters = {'device_id': [port.get('device_id')], 'network_id': [network['network_id'] for network in networks_with_same_queue]} query = model_query.query_with_hooks(context, models_v2.Port.id) model_query.apply_filters(query, models_v2.Port, filters, context) ports_ids = [p[0] for p in query] if ports_ids: # shared queue already exists find the queue id queues = self._get_port_queue_bindings(context, {'port_id': ports_ids}, ['queue_id']) if queues: return queues[0]['queue_id'] # get the size of the queue we want to create queue_to_create = self.get_qos_queue( context, network_queue_id[0]['queue_id']) else: # check for default queue filters = {'default': [True]} # context is elevated since default queue is owned by admin queue_to_create = self.get_qos_queues(context.elevated(), filters) if not queue_to_create: return queue_to_create = queue_to_create[0] # create the queue tenant_id = port['tenant_id'] if port.get(qos.RXTX_FACTOR) and queue_to_create.get('max'): queue_to_create['max'] = int(queue_to_create['max'] * port[qos.RXTX_FACTOR]) queue = {'qos_queue': {'name': queue_to_create.get('name'), 'min': queue_to_create.get('min'), 'max': queue_to_create.get('max'), 'dscp': queue_to_create.get('dscp'), 'qos_marking': queue_to_create.get('qos_marking'), 'tenant_id': tenant_id}} return self.create_qos_queue(context, queue, False)['id'] def _validate_qos_queue(self, context, qos_queue): if qos_queue.get('default'): if context.is_admin: if self.get_qos_queues(context, filters={'default': [True]}): raise qos.DefaultQueueAlreadyExists() else: raise qos.DefaultQueueCreateNotAdmin() if qos_queue.get('qos_marking') == 'trusted': dscp = qos_queue.pop('dscp') if dscp: # must raise because a non-zero dscp was provided raise qos.QueueInvalidMarking() LOG.info("DSCP value (%s) will be ignored with 'trusted' " "marking", dscp) max = qos_queue.get('max') min = qos_queue.get('min') # Max can be None if max and min > max: raise qos.QueueMinGreaterMax() vmware-nsx-12.0.1/vmware_nsx/db/vnic_index_db.py0000666000175100017510000001037413244523345021665 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.orm import exc from neutron.db import _resource_extend as resource_extend from neutron_lib.api.definitions import port as port_def from oslo_db import exception as db_exc from oslo_log import log as logging from vmware_nsx.db import nsxv_models from vmware_nsx.extensions import vnicindex as vnicidx LOG = logging.getLogger(__name__) @resource_extend.has_resource_extenders class VnicIndexDbMixin(object): @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_vnic_index_binding(port_res, port_db): state = port_db.vnic_index port_res[vnicidx.VNIC_INDEX] = state.index if state else None def _get_port_vnic_index(self, context, port_id): """Returns the vnic index for the given port. If the port is not associated with any vnic then return None """ session = context.session try: mapping = (session.query(nsxv_models.NsxvPortIndexMapping). filter_by(port_id=port_id).one()) return mapping['index'] except exc.NoResultFound: LOG.debug("No record in DB for vnic-index of port %s", port_id) def _get_mappings_for_device_id(self, context, device_id): session = context.session mappings = (session.query(nsxv_models.NsxvPortIndexMapping). filter_by(device_id=device_id)) return mappings def _create_port_vnic_index_mapping(self, context, port_id, device_id, index): """Save the port vnic-index to DB.""" session = context.session with session.begin(subtransactions=True): index_mapping_model = nsxv_models.NsxvPortIndexMapping( port_id=port_id, device_id=device_id, index=index) session.add(index_mapping_model) def _update_port_vnic_index_mapping(self, context, port_id, device_id, index): session = context.session # delete original entry query = (session.query(nsxv_models.NsxvPortIndexMapping). filter_by(device_id=device_id, index=index)) query.delete() # create a new one self._create_port_vnic_index_mapping(context, port_id, device_id, index) def _set_port_vnic_index_mapping(self, context, port_id, device_id, index): """Save the port vnic-index to DB.""" try: self._create_port_vnic_index_mapping(context, port_id, device_id, index) except db_exc.DBDuplicateEntry: # A retry for the nova scheduling could result in this error. LOG.debug("Entry already exists for %s %s %s", port_id, device_id, index) mappings = self._get_mappings_for_device_id(context, device_id) for mapping in mappings: if (mapping['port_id'] != port_id and mapping['index'] == index): # a new port is using this device - update! self._update_port_vnic_index_mapping(context, port_id, device_id, index) return if (mapping['port_id'] == port_id and mapping['index'] != index): raise def _delete_port_vnic_index_mapping(self, context, port_id): """Delete the port vnic-index association.""" session = context.session query = (session.query(nsxv_models.NsxvPortIndexMapping). filter_by(port_id=port_id)) query.delete() vmware-nsx-12.0.1/vmware_nsx/db/extended_security_group.py0000666000175100017510000004251013244523345024032 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc from sqlalchemy import sql from neutron.db import _resource_extend as resource_extend from neutron.db import api as db_api from neutron.db.models import securitygroup as securitygroups_db from neutron.extensions import securitygroup as ext_sg from neutron.objects import securitygroup as sg_obj from neutron_lib.api.definitions import port as port_def from neutron_lib.api import validators from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as n_constants from neutron_lib.db import model_base from neutron_lib.utils import helpers from neutron_lib.utils import net as n_utils from vmware_nsx.extensions import providersecuritygroup as provider_sg from vmware_nsx.extensions import securitygrouplogging as sg_logging from vmware_nsx.extensions import securitygrouppolicy as sg_policy LOG = logging.getLogger(__name__) class NsxExtendedSecurityGroupProperties(model_base.BASEV2): __tablename__ = 'nsx_extended_security_group_properties' security_group_id = sa.Column(sa.String(36), sa.ForeignKey('securitygroups.id', ondelete="CASCADE"), primary_key=True) logging = sa.Column(sa.Boolean, default=False, nullable=False) provider = sa.Column(sa.Boolean, default=False, server_default=sql.false(), nullable=False) policy = sa.Column(sa.String(36)) security_group = orm.relationship( securitygroups_db.SecurityGroup, backref=orm.backref('ext_properties', lazy='joined', uselist=False, cascade='delete')) @resource_extend.has_resource_extenders class ExtendedSecurityGroupPropertiesMixin(object): # NOTE(arosen): here we add a relationship so that from the ports model # it provides us access to SecurityGroupPortBinding and # NsxExtendedSecurityGroupProperties securitygroups_db.SecurityGroupPortBinding.extended_grp = orm.relationship( 'NsxExtendedSecurityGroupProperties', foreign_keys="SecurityGroupPortBinding.security_group_id", primaryjoin=("NsxExtendedSecurityGroupProperties.security_group_id" "==SecurityGroupPortBinding.security_group_id")) def create_provider_security_group(self, context, security_group): return self.create_security_group_without_rules( context, security_group, False, True) def create_security_group_without_rules(self, context, security_group, default_sg, is_provider): """Create a neutron security group, without any default rules. This method creates a security group that does not by default enable egress traffic which normal neutron security groups do. """ s = security_group['security_group'] kwargs = { 'context': context, 'security_group': s, 'is_default': default_sg, } self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_CREATE, exc_cls=ext_sg.SecurityGroupConflict, **kwargs) tenant_id = s['tenant_id'] if not default_sg: self._ensure_default_security_group(context, tenant_id) with db_api.context_manager.writer.using(context): sg = sg_obj.SecurityGroup( context, id=s.get('id') or uuidutils.generate_uuid(), description=s.get('description', ''), project_id=tenant_id, name=s.get('name', ''), is_default=default_sg) sg.create() secgroup_dict = self._make_security_group_dict(sg) secgroup_dict[sg_policy.POLICY] = s.get(sg_policy.POLICY) secgroup_dict[provider_sg.PROVIDER] = is_provider kwargs['security_group'] = secgroup_dict registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self, **kwargs) return secgroup_dict def _process_security_group_properties_create(self, context, sg_res, sg_req, default_sg=False): self._validate_security_group_properties_create( context, sg_req, default_sg) with db_api.context_manager.writer.using(context): properties = NsxExtendedSecurityGroupProperties( security_group_id=sg_res['id'], logging=sg_req.get(sg_logging.LOGGING, False), provider=sg_req.get(provider_sg.PROVIDER, False), policy=sg_req.get(sg_policy.POLICY)) context.session.add(properties) sg_res[sg_logging.LOGGING] = sg_req.get(sg_logging.LOGGING, False) sg_res[provider_sg.PROVIDER] = sg_req.get(provider_sg.PROVIDER, False) sg_res[sg_policy.POLICY] = sg_req.get(sg_policy.POLICY) def _get_security_group_properties(self, context, security_group_id): with db_api.context_manager.reader.using(context): try: prop = context.session.query( NsxExtendedSecurityGroupProperties).filter_by( security_group_id=security_group_id).one() except exc.NoResultFound: raise ext_sg.SecurityGroupNotFound(id=security_group_id) return prop def _process_security_group_properties_update(self, context, sg_res, sg_req): if ((sg_logging.LOGGING in sg_req and (sg_req[sg_logging.LOGGING] != sg_res.get(sg_logging.LOGGING, False))) or (sg_policy.POLICY in sg_req and (sg_req[sg_policy.POLICY] != sg_res.get(sg_policy.POLICY)))): prop = self._get_security_group_properties(context, sg_res['id']) with db_api.context_manager.writer.using(context): prop.update({ sg_logging.LOGGING: sg_req.get(sg_logging.LOGGING, False), sg_policy.POLICY: sg_req.get(sg_policy.POLICY)}) sg_res[sg_logging.LOGGING] = sg_req.get(sg_logging.LOGGING, False) sg_res[sg_policy.POLICY] = sg_req.get(sg_policy.POLICY) def _is_security_group_logged(self, context, security_group_id): prop = self._get_security_group_properties(context, security_group_id) return prop.logging def _is_provider_security_group(self, context, security_group_id): sg_prop = self._get_security_group_properties(context, security_group_id) return sg_prop.provider def _is_policy_security_group(self, context, security_group_id): sg_prop = self._get_security_group_properties(context, security_group_id) return True if sg_prop.policy else False def _get_security_group_policy(self, context, security_group_id): sg_prop = self._get_security_group_properties(context, security_group_id) return sg_prop.policy def _check_provider_security_group_exists(self, context, security_group_id): # NOTE(roeyc): We want to retrieve the security-group info by calling # get_security_group, this will also validate that the provider # security-group belongs to the same tenant this request is made for. sg = self.get_security_group(context, security_group_id) if not sg[provider_sg.PROVIDER]: raise provider_sg.SecurityGroupNotProvider(id=sg) def _check_invalid_security_groups_specified(self, context, port, only_warn=False): """Check if the lists of security groups are valid When only_warn is True we do not raise an exception here, because this may fail nova boot. Instead we will later remove provider security groups from the regular security groups list of the port. Since all the provider security groups of the tenant will be on this list anyway, the result will be the same. """ if validators.is_attr_set(port.get(ext_sg.SECURITYGROUPS)): for sg in port.get(ext_sg.SECURITYGROUPS, []): # makes sure user doesn't add non-provider secgrp as secgrp if self._is_provider_security_group(context, sg): if only_warn: LOG.warning( "Ignored provider security group %(sg)s in " "security groups list for port %(id)s", {'sg': sg, 'id': port['id']}) else: raise provider_sg.SecurityGroupIsProvider(id=sg) if validators.is_attr_set( port.get(provider_sg.PROVIDER_SECURITYGROUPS)): # also check all provider groups are provider. for sg in port.get(provider_sg.PROVIDER_SECURITYGROUPS, []): self._check_provider_security_group_exists(context, sg) def _get_tenant_provider_security_groups(self, context, tenant_id): res = context.session.query( NsxExtendedSecurityGroupProperties.security_group_id ).join(securitygroups_db.SecurityGroup).filter( securitygroups_db.SecurityGroup.tenant_id == tenant_id, NsxExtendedSecurityGroupProperties.provider == sa.true()).all() return [r[0] for r in res] def _validate_security_group_properties_create(self, context, security_group, default_sg): self._validate_provider_security_group_create(context, security_group, default_sg) def _validate_provider_security_group_create(self, context, security_group, default_sg): if not security_group.get(provider_sg.PROVIDER, False): return if default_sg: raise provider_sg.DefaultSecurityGroupIsNotProvider() def _get_provider_security_groups_on_port(self, context, port): p = port['port'] tenant_id = p['tenant_id'] provider_sgs = p.get(provider_sg.PROVIDER_SECURITYGROUPS, n_constants.ATTR_NOT_SPECIFIED) if p.get('device_owner') and n_utils.is_port_trusted(p): return if not validators.is_attr_set(provider_sgs): if provider_sgs is n_constants.ATTR_NOT_SPECIFIED: provider_sgs = self._get_tenant_provider_security_groups( context, tenant_id) else: # Accept None as indication that this port should not be # associated with any provider security-group. provider_sgs = [] return provider_sgs def _get_port_security_groups_lists(self, context, port): """Return 2 lists of this port security groups: 1) Regular security groups for this port 2) Provider security groups for this port """ port_data = port['port'] # First check that the configuration is valid self._check_invalid_security_groups_specified( context, port_data, only_warn=True) # get the 2 separate lists of security groups sgids = self._get_security_groups_on_port( context, port) or [] psgids = self._get_provider_security_groups_on_port( context, port) or [] had_sgs = len(sgids) > 0 # remove provider security groups which were specified also in the # regular sg list sgids = list(set(sgids) - set(psgids)) if not len(sgids) and had_sgs: # Add the default sg of the tenant if no other remained tenant_id = port_data.get('tenant_id') default_sg = self._ensure_default_security_group( context, tenant_id) sgids.append(default_sg) return (sgids, psgids) def _process_port_create_provider_security_group(self, context, p, security_group_ids): if validators.is_attr_set(security_group_ids): for security_group_id in security_group_ids: self._create_port_security_group_binding(context, p['id'], security_group_id) p[provider_sg.PROVIDER_SECURITYGROUPS] = security_group_ids or [] def _process_port_update_provider_security_group(self, context, port, original_port, updated_port): p = port['port'] provider_sg_specified = (provider_sg.PROVIDER_SECURITYGROUPS in p and p[provider_sg.PROVIDER_SECURITYGROUPS] != n_constants.ATTR_NOT_SPECIFIED) provider_sg_changed = ( provider_sg_specified and not helpers.compare_elements( original_port[provider_sg.PROVIDER_SECURITYGROUPS], p[provider_sg.PROVIDER_SECURITYGROUPS])) sg_changed = ( set(original_port[ext_sg.SECURITYGROUPS]) != set(updated_port[ext_sg.SECURITYGROUPS])) if sg_changed or provider_sg_changed: self._check_invalid_security_groups_specified(context, p) if provider_sg_changed: port['port']['tenant_id'] = original_port['id'] port['port']['id'] = original_port['id'] updated_port[provider_sg.PROVIDER_SECURITYGROUPS] = ( self._get_provider_security_groups_on_port(context, port)) else: updated_port[provider_sg.PROVIDER_SECURITYGROUPS] = ( original_port[provider_sg.PROVIDER_SECURITYGROUPS]) if provider_sg_changed or sg_changed: if not sg_changed: query = context.session.query( securitygroups_db.SecurityGroupPortBinding) for sg in original_port[provider_sg.PROVIDER_SECURITYGROUPS]: binding = query.filter_by( port_id=p['id'], security_group_id=sg).one() context.session.delete(binding) self._process_port_create_provider_security_group( context, updated_port, updated_port[provider_sg.PROVIDER_SECURITYGROUPS]) return provider_sg_changed def _prevent_non_admin_delete_provider_sg(self, context, sg_id): # Only someone who is an admin is allowed to delete this. if not context.is_admin and self._is_provider_security_group(context, sg_id): raise provider_sg.ProviderSecurityGroupDeleteNotAdmin(id=sg_id) def _prevent_non_admin_delete_policy_sg(self, context, sg_id): # Only someone who is an admin is allowed to delete this. if not context.is_admin and self._is_policy_security_group(context, sg_id): raise sg_policy.PolicySecurityGroupDeleteNotAdmin(id=sg_id) @staticmethod @resource_extend.extends([ext_sg.SECURITYGROUPS]) def _extend_security_group_with_properties(sg_res, sg_db): if sg_db.ext_properties: sg_res[sg_logging.LOGGING] = sg_db.ext_properties.logging sg_res[provider_sg.PROVIDER] = sg_db.ext_properties.provider sg_res[sg_policy.POLICY] = sg_db.ext_properties.policy @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_dict_provider_security_group(port_res, port_db): # Add the provider sg list to the port. # later we will remove those from the regular sg list provider_groups = [] for sec_group_mapping in port_db.security_groups: if (sec_group_mapping.extended_grp and sec_group_mapping.extended_grp.provider is True): provider_groups.append(sec_group_mapping['security_group_id']) port_res[provider_sg.PROVIDER_SECURITYGROUPS] = provider_groups return port_res @staticmethod def _remove_provider_security_groups_from_list(port_res): # Remove provider security groups from the list of regular security # groups of the result port if (ext_sg.SECURITYGROUPS not in port_res or provider_sg.PROVIDER_SECURITYGROUPS not in port_res): return port_res[ext_sg.SECURITYGROUPS] = list( set(port_res[ext_sg.SECURITYGROUPS]) - set(port_res[provider_sg.PROVIDER_SECURITYGROUPS])) vmware-nsx-12.0.1/vmware_nsx/db/__init__.py0000666000175100017510000000000013244523345020612 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/db/extended_security_group_rule.py0000666000175100017510000000704713244523345025067 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db import _resource_extend as resource_extend from neutron.db import api as db_api from neutron.db.models import securitygroup from neutron.extensions import securitygroup as ext_sg from neutron_lib.api import validators from neutron_lib import exceptions as nexception from vmware_nsx._i18n import _ from vmware_nsx.extensions import secgroup_rule_local_ip_prefix as ext_local_ip class NotIngressRule(nexception.BadRequest): message = _("Specifying local_ip_prefix is supported " "with ingress rules only.") class NsxExtendedSecurityGroupRuleProperties(model_base.BASEV2): """Persist security group rule properties for the extended-security-group-rule extension. """ __tablename__ = 'nsx_extended_security_group_rule_properties' rule_id = sa.Column(sa.String(36), sa.ForeignKey('securitygrouprules.id', ondelete='CASCADE'), primary_key=True, nullable=False) local_ip_prefix = sa.Column(sa.String(255), nullable=False) rule = orm.relationship( securitygroup.SecurityGroupRule, backref=orm.backref('ext_properties', lazy='joined', uselist=False, cascade='delete')) @resource_extend.has_resource_extenders class ExtendedSecurityGroupRuleMixin(object): def _check_local_ip_prefix(self, context, rule): rule_specify_local_ip_prefix = validators.is_attr_set( rule.get(ext_local_ip.LOCAL_IP_PREFIX)) if rule_specify_local_ip_prefix and rule['direction'] != 'ingress': raise NotIngressRule() if not rule_specify_local_ip_prefix: # remove ATTR_NOT_SPECIFIED rule[ext_local_ip.LOCAL_IP_PREFIX] = None return rule_specify_local_ip_prefix def _process_security_group_rule_properties(self, context, rule_res, rule_req): rule_res[ext_local_ip.LOCAL_IP_PREFIX] = None if not validators.is_attr_set( rule_req.get(ext_local_ip.LOCAL_IP_PREFIX)): return with db_api.context_manager.writer.using(context): properties = NsxExtendedSecurityGroupRuleProperties( rule_id=rule_res['id'], local_ip_prefix=rule_req[ext_local_ip.LOCAL_IP_PREFIX]) context.session.add(properties) rule_res[ext_local_ip.LOCAL_IP_PREFIX] = ( rule_req[ext_local_ip.LOCAL_IP_PREFIX]) @staticmethod @resource_extend.extends([ext_sg.SECURITYGROUPRULES]) def _extend_security_group_rule_with_params(sg_rule_res, sg_rule_db): if sg_rule_db.ext_properties: sg_rule_res[ext_local_ip.LOCAL_IP_PREFIX] = ( sg_rule_db.ext_properties.local_ip_prefix) else: sg_rule_res[ext_local_ip.LOCAL_IP_PREFIX] = None vmware-nsx-12.0.1/vmware_nsx/db/db.py0000666000175100017510000006371713244523413017464 0ustar zuulzuul00000000000000# Copyright 2012 VMware, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from sqlalchemy.orm import exc from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils import neutron.db.api as db from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import nsx_models LOG = logging.getLogger(__name__) def _apply_filters_to_query(query, model, filters, like_filters=None): if filters: for key, value in six.iteritems(filters): column = getattr(model, key, None) if column: query = query.filter(column.in_(value)) if like_filters: for key, search_term in six.iteritems(like_filters): column = getattr(model, key, None) if column: query = query.filter(column.like(search_term)) return query def get_network_bindings(session, network_id): session = session or db.get_reader_session() return (session.query(nsx_models.TzNetworkBinding). filter_by(network_id=network_id). all()) def get_network_bindings_by_vlanid_and_physical_net(session, vlan_id, phy_uuid): session = session or db.get_reader_session() return (session.query(nsx_models.TzNetworkBinding). filter_by(vlan_id=vlan_id, phy_uuid=phy_uuid). all()) def delete_network_bindings(session, network_id): return (session.query(nsx_models.TzNetworkBinding). filter_by(network_id=network_id).delete()) def add_network_binding(session, network_id, binding_type, phy_uuid, vlan_id): with session.begin(subtransactions=True): binding = nsx_models.TzNetworkBinding(network_id, binding_type, phy_uuid, vlan_id) session.add(binding) return binding def add_neutron_nsx_network_mapping(session, neutron_id, nsx_switch_id, dvs_id=None): with session.begin(subtransactions=True): mapping = nsx_models.NeutronNsxNetworkMapping( neutron_id=neutron_id, nsx_id=nsx_switch_id, dvs_id=dvs_id) session.add(mapping) return mapping def delete_neutron_nsx_network_mapping(session, neutron_id): return (session.query(nsx_models.NeutronNsxNetworkMapping). filter_by(neutron_id=neutron_id).delete()) def add_neutron_nsx_port_mapping(session, neutron_id, nsx_switch_id, nsx_port_id): session.begin(subtransactions=True) try: mapping = nsx_models.NeutronNsxPortMapping( neutron_id, nsx_switch_id, nsx_port_id) session.add(mapping) session.commit() except db_exc.DBDuplicateEntry: with excutils.save_and_reraise_exception() as ctxt: session.rollback() # do not complain if the same exact mapping is being added, # otherwise re-raise because even though it is possible for the # same neutron port to map to different back-end ports over time, # this should not occur whilst a mapping already exists current = get_nsx_switch_and_port_id(session, neutron_id) if current[1] == nsx_port_id: LOG.debug("Port mapping for %s already available", neutron_id) ctxt.reraise = False except db_exc.DBError: with excutils.save_and_reraise_exception(): # rollback for any other db error session.rollback() return mapping def add_neutron_nsx_router_mapping(session, neutron_id, nsx_router_id): with session.begin(subtransactions=True): mapping = nsx_models.NeutronNsxRouterMapping( neutron_id=neutron_id, nsx_id=nsx_router_id) session.add(mapping) return mapping def add_neutron_nsx_security_group_mapping(session, neutron_id, nsx_id): """Map a Neutron security group to a NSX security profile. :param session: a valid database session object :param neutron_id: a neutron security group identifier :param nsx_id: a nsx security profile identifier """ with session.begin(subtransactions=True): mapping = nsx_models.NeutronNsxSecurityGroupMapping( neutron_id=neutron_id, nsx_id=nsx_id) session.add(mapping) return mapping def get_nsx_service_binding(session, network_id, service_type): try: return session.query(nsx_models.NeutronNsxServiceBinding).filter_by( network_id=network_id, nsx_service_type=service_type).one() except exc.NoResultFound: LOG.debug("NSX %s service not enabled on network %s", service_type, network_id) def add_neutron_nsx_service_binding(session, network_id, port_id, service_type, service_id): """Store enabled NSX services on each Neutron network. :param session: database session object :param network_id: identifier of Neutron network enabling the service :param port_id: identifier of Neutron port providing the service :param service_type: type of NSX service :param service_id: identifier of NSX service """ with session.begin(subtransactions=True): binding = nsx_models.NeutronNsxServiceBinding( network_id=network_id, port_id=port_id, nsx_service_type=service_type, nsx_service_id=service_id) session.add(binding) return binding def delete_neutron_nsx_service_binding(session, network_id, service_type): return session.query(nsx_models.NeutronNsxServiceBinding).filter_by( network_id=network_id, nsx_service_type=service_type).delete() def update_nsx_dhcp_bindings(session, port_id, org_ip, new_ip): try: with session.begin(subtransactions=True): binding = (session.query(nsx_models.NeutronNsxDhcpBinding). filter_by(port_id=port_id, ip_address=org_ip).one()) binding.ip_address = new_ip except exc.NoResultFound: LOG.debug("Binding not found for port %s", port_id) return def get_nsx_dhcp_bindings(session, port_id): return [binding for binding in session.query( nsx_models.NeutronNsxDhcpBinding).filter_by(port_id=port_id)] def get_nsx_dhcp_bindings_by_service(session, service_id): return [binding for binding in session.query( nsx_models.NeutronNsxDhcpBinding).filter_by(nsx_service_id=service_id)] def add_neutron_nsx_dhcp_binding(session, port_id, subnet_id, ip_address, service_id, binding_id): """Store DHCP binding of each Neutron port. :param session: database session object :param port_id: identifier of Neutron port with DHCP binding :param subnet_id: identifier of Neutron subnet for the port :param ip_address: IP address for the port in this subnet. :param service_id: identifier of NSX DHCP service :param binding_id: identifier of NSX DHCP binding """ with session.begin(subtransactions=True): binding = nsx_models.NeutronNsxDhcpBinding( port_id=port_id, subnet_id=subnet_id, ip_address=ip_address, nsx_service_id=service_id, nsx_binding_id=binding_id) session.add(binding) return binding def delete_neutron_nsx_dhcp_binding(session, port_id, binding_id): return session.query(nsx_models.NeutronNsxDhcpBinding).filter_by( port_id=port_id, nsx_binding_id=binding_id).delete() def delete_neutron_nsx_dhcp_bindings_by_service_id(session, service_id): return session.query(nsx_models.NeutronNsxDhcpBinding).filter_by( nsx_service_id=service_id).delete() def get_nsx_switch_ids(session, neutron_id): # This function returns a list of NSX switch identifiers because of # the possibility of chained logical switches return [mapping['nsx_id'] for mapping in session.query(nsx_models.NeutronNsxNetworkMapping).filter_by( neutron_id=neutron_id)] def get_nsx_network_mappings(session, neutron_id): # This function returns a list of NSX switch identifiers because of # the possibility of chained logical switches return session.query(nsx_models.NeutronNsxNetworkMapping).filter_by( neutron_id=neutron_id).all() def get_nsx_switch_id_for_dvs(session, neutron_id, dvs_id): """Retrieve the NSX switch ID for a given DVS ID and neutron network.""" try: mapping = (session.query(nsx_models.NeutronNsxNetworkMapping). filter_by(neutron_id=neutron_id, dvs_id=dvs_id).one()) return mapping['nsx_id'] except exc.NoResultFound: LOG.debug("NSX switch for dvs-id: %s not yet stored in Neutron DB", dvs_id) def get_net_ids(session, nsx_id): return [mapping['neutron_id'] for mapping in get_nsx_network_mapping_for_nsx_id(session, nsx_id)] def get_nsx_network_mapping_for_nsx_id(session, nsx_id): return session.query(nsx_models.NeutronNsxNetworkMapping).filter_by( nsx_id=nsx_id).all() def get_nsx_networks_mapping(session): return session.query(nsx_models.NeutronNsxNetworkMapping).all() def get_nsx_switch_and_port_id(session, neutron_id): try: mapping = (session.query(nsx_models.NeutronNsxPortMapping). filter_by(neutron_id=neutron_id). one()) return mapping['nsx_switch_id'], mapping['nsx_port_id'] except exc.NoResultFound: LOG.debug("NSX identifiers for neutron port %s not yet " "stored in Neutron DB", neutron_id) return None, None def get_nsx_router_id(session, neutron_id): try: mapping = (session.query(nsx_models.NeutronNsxRouterMapping). filter_by(neutron_id=neutron_id).one()) return mapping['nsx_id'] except exc.NoResultFound: LOG.debug("NSX identifiers for neutron router %s not yet " "stored in Neutron DB", neutron_id) def get_neutron_from_nsx_router_id(session, nsx_router_id): try: mapping = (session.query(nsx_models.NeutronNsxRouterMapping). filter_by(nsx_id=nsx_router_id).one()) return mapping['neutron_id'] except exc.NoResultFound: LOG.debug("Couldn't find router with nsx id %s", nsx_router_id) def get_nsx_security_group_id(session, neutron_id, moref=False): """Return the id of a security group in the NSX backend. Note: security groups are called 'security profiles' in NSX """ try: mappings = (session.query(nsx_models.NeutronNsxSecurityGroupMapping). filter_by(neutron_id=neutron_id). all()) for mapping in mappings: if moref and not uuidutils.is_uuid_like(mapping['nsx_id']): return mapping['nsx_id'] if not moref and uuidutils.is_uuid_like(mapping['nsx_id']): return mapping['nsx_id'] except exc.NoResultFound: LOG.debug("NSX identifiers for neutron security group %s not yet " "stored in Neutron DB", neutron_id) return None def get_nsx_security_group_ids(session, neutron_ids): """Return list of ids of a security groups in the NSX backend. """ filters = {'neutron_id': neutron_ids} like_filters = None query = session.query(nsx_models.NeutronNsxSecurityGroupMapping) mappings = _apply_filters_to_query( query, nsx_models.NeutronNsxSecurityGroupMapping, filters, like_filters).all() return [mapping['nsx_id'] for mapping in mappings if mapping['nsx_id'] is not None] def _delete_by_neutron_id(session, model, neutron_id): return session.query(model).filter_by(neutron_id=neutron_id).delete() def delete_neutron_nsx_port_mapping(session, neutron_id): return _delete_by_neutron_id( session, nsx_models.NeutronNsxPortMapping, neutron_id) def delete_neutron_nsx_router_mapping(session, neutron_id): return _delete_by_neutron_id( session, nsx_models.NeutronNsxRouterMapping, neutron_id) def unset_default_network_gateways(session): with session.begin(subtransactions=True): session.query(nsx_models.NetworkGateway).update( {nsx_models.NetworkGateway.default: False}) def set_default_network_gateway(session, gw_id): with session.begin(subtransactions=True): gw = (session.query(nsx_models.NetworkGateway). filter_by(id=gw_id).one()) gw['default'] = True def set_multiprovider_network(session, network_id): with session.begin(subtransactions=True): multiprovider_network = nsx_models.MultiProviderNetworks( network_id) session.add(multiprovider_network) return multiprovider_network def is_multiprovider_network(session, network_id): with session.begin(subtransactions=True): return bool( session.query(nsx_models.MultiProviderNetworks).filter_by( network_id=network_id).first()) # NSXv3 L2 Gateway DB methods. def add_l2gw_connection_mapping(session, connection_id, bridge_endpoint_id, port_id): with session.begin(subtransactions=True): mapping = nsx_models.NsxL2GWConnectionMapping( connection_id=connection_id, port_id=port_id, bridge_endpoint_id=bridge_endpoint_id) session.add(mapping) return mapping def get_l2gw_connection_mapping(session, connection_id): try: return (session.query(nsx_models.NsxL2GWConnectionMapping). filter_by(connection_id=connection_id).one()) except exc.NoResultFound: raise nsx_exc.NsxL2GWConnectionMappingNotFound(conn=connection_id) # NSXv3 QoS policy id <-> switch Id mapping def add_qos_policy_profile_mapping(session, qos_policy_id, switch_profile_id): with session.begin(subtransactions=True): mapping = nsx_models.QosPolicySwitchProfile( qos_policy_id=qos_policy_id, switch_profile_id=switch_profile_id) session.add(mapping) return mapping def get_switch_profile_by_qos_policy(session, qos_policy_id): try: entry = (session.query(nsx_models.QosPolicySwitchProfile). filter_by(qos_policy_id=qos_policy_id).one()) return entry.switch_profile_id except exc.NoResultFound: raise nsx_exc.NsxQosPolicyMappingNotFound(policy=qos_policy_id) def delete_qos_policy_profile_mapping(session, qos_policy_id): return (session.query(nsx_models.QosPolicySwitchProfile). filter_by(qos_policy_id=qos_policy_id).delete()) # NSXv3 Port Mirror Sessions DB methods. def add_port_mirror_session_mapping(session, tf_id, pm_session_id): with session.begin(subtransactions=True): mapping = nsx_models.NsxPortMirrorSessionMapping( tap_flow_id=tf_id, port_mirror_session_id=pm_session_id) session.add(mapping) return mapping def get_port_mirror_session_mapping(session, tf_id): try: return (session.query(nsx_models.NsxPortMirrorSessionMapping). filter_by(tap_flow_id=tf_id).one()) except exc.NoResultFound: raise nsx_exc.NsxPortMirrorSessionMappingNotFound(tf=tf_id) def delete_port_mirror_session_mapping(session, tf_id): return (session.query(nsx_models.NsxPortMirrorSessionMapping). filter_by(tap_flow_id=tf_id).delete()) @db.context_manager.writer def save_sg_mappings(context, sg_id, nsgroup_id, section_id): context.session.add( nsx_models.NeutronNsxFirewallSectionMapping(neutron_id=sg_id, nsx_id=section_id)) context.session.add( nsx_models.NeutronNsxSecurityGroupMapping(neutron_id=sg_id, nsx_id=nsgroup_id)) def get_sg_mappings(session, sg_id, moref=False): nsgroup_mappings = session.query( nsx_models.NeutronNsxSecurityGroupMapping ).filter_by(neutron_id=sg_id).all() nsgroup_mapping = section_mapping = None for mapping in nsgroup_mappings: if moref and not uuidutils.is_uuid_like(mapping['nsx_id']): nsgroup_mapping = mapping['nsx_id'] break if not moref and uuidutils.is_uuid_like(mapping['nsx_id']): nsgroup_mapping = mapping['nsx_id'] break section_mappings = session.query( nsx_models.NeutronNsxFirewallSectionMapping ).filter_by(neutron_id=sg_id).all() for mapping in section_mappings: if moref and not uuidutils.is_uuid_like(mapping['nsx_id']): section_mapping = mapping['nsx_id'] break if not moref and uuidutils.is_uuid_like(mapping['nsx_id']): section_mapping = mapping['nsx_id'] break return nsgroup_mapping, section_mapping def get_sg_rule_mapping(session, rule_id): rule_mapping = session.query( nsx_models.NeutronNsxRuleMapping).filter_by( neutron_id=rule_id).one() return rule_mapping.nsx_id def save_sg_rule_mappings(session, rules): with session.begin(subtransactions=True): for neutron_id, nsx_id in rules: mapping = nsx_models.NeutronNsxRuleMapping( neutron_id=neutron_id, nsx_id=nsx_id) session.add(mapping) def add_nsx_ipam_subnet_pool(session, subnet_id, nsx_pool_id): with session.begin(subtransactions=True): binding = nsx_models.NsxSubnetIpam( subnet_id=subnet_id, nsx_pool_id=nsx_pool_id) session.add(binding) return binding def get_nsx_ipam_pool_for_subnet(session, subnet_id): try: entry = session.query( nsx_models.NsxSubnetIpam).filter_by( subnet_id=subnet_id).one() return entry.nsx_pool_id except exc.NoResultFound: return def del_nsx_ipam_subnet_pool(session, subnet_id, nsx_pool_id): return (session.query(nsx_models.NsxSubnetIpam). filter_by(subnet_id=subnet_id, nsx_pool_id=nsx_pool_id).delete()) def get_certificate(session, purpose): try: cert_entry = session.query( nsx_models.NsxCertificateRepository).filter_by( purpose=purpose).one() return cert_entry.certificate, cert_entry.private_key except exc.NoResultFound: return None, None def save_certificate(session, purpose, cert, pk): with session.begin(subtransactions=True): cert_entry = nsx_models.NsxCertificateRepository( purpose=purpose, certificate=cert, private_key=pk) session.add(cert_entry) def delete_certificate(session, purpose): return (session.query(nsx_models.NsxCertificateRepository). filter_by(purpose=purpose).delete()) def add_nsx_lbaas_loadbalancer_binding(session, loadbalancer_id, lb_service_id, lb_router_id, vip_address): with session.begin(subtransactions=True): binding = nsx_models.NsxLbaasLoadbalancer( loadbalancer_id=loadbalancer_id, lb_service_id=lb_service_id, lb_router_id=lb_router_id, vip_address=vip_address) session.add(binding) return binding def get_nsx_lbaas_loadbalancer_binding(session, loadbalancer_id): try: return session.query( nsx_models.NsxLbaasLoadbalancer).filter_by( loadbalancer_id=loadbalancer_id).one() except exc.NoResultFound: return def get_nsx_lbaas_loadbalancer_binding_by_service(session, lb_service_id): return session.query( nsx_models.NsxLbaasLoadbalancer).filter_by( lb_service_id=lb_service_id).all() def delete_nsx_lbaas_loadbalancer_binding(session, loadbalancer_id): return (session.query(nsx_models.NsxLbaasLoadbalancer). filter_by(loadbalancer_id=loadbalancer_id).delete()) def add_nsx_lbaas_listener_binding(session, loadbalancer_id, listener_id, app_profile_id, lb_vs_id): with session.begin(subtransactions=True): binding = nsx_models.NsxLbaasListener( loadbalancer_id=loadbalancer_id, listener_id=listener_id, app_profile_id=app_profile_id, lb_vs_id=lb_vs_id) session.add(binding) return binding def get_nsx_lbaas_listener_binding(session, loadbalancer_id, listener_id): try: return session.query( nsx_models.NsxLbaasListener).filter_by( loadbalancer_id=loadbalancer_id, listener_id=listener_id).one() except exc.NoResultFound: return def delete_nsx_lbaas_listener_binding(session, loadbalancer_id, listener_id): return (session.query(nsx_models.NsxLbaasListener). filter_by(loadbalancer_id=loadbalancer_id, listener_id=listener_id).delete()) def add_nsx_lbaas_pool_binding(session, loadbalancer_id, pool_id, lb_pool_id, lb_vs_id=None): with session.begin(subtransactions=True): binding = nsx_models.NsxLbaasPool(loadbalancer_id=loadbalancer_id, pool_id=pool_id, lb_pool_id=lb_pool_id, lb_vs_id=lb_vs_id) session.add(binding) return binding def get_nsx_lbaas_pool_binding(session, loadbalancer_id, pool_id): try: return session.query(nsx_models.NsxLbaasPool).filter_by( loadbalancer_id=loadbalancer_id, pool_id=pool_id).one() except exc.NoResultFound: return def update_nsx_lbaas_pool_binding(session, loadbalancer_id, pool_id, lb_vs_id): try: with session.begin(subtransactions=True): binding = (session.query(nsx_models.NsxLbaasPool). filter_by(loadbalancer_id=loadbalancer_id, pool_id=pool_id).one()) binding.lb_vs_id = lb_vs_id except exc.NoResultFound: LOG.debug("Binding not found for pool %s", pool_id) return def delete_nsx_lbaas_pool_binding(session, loadbalancer_id, pool_id): return (session.query(nsx_models.NsxLbaasPool). filter_by(loadbalancer_id=loadbalancer_id, pool_id=pool_id).delete()) def add_nsx_lbaas_monitor_binding(session, loadbalancer_id, pool_id, hm_id, lb_monitor_id, lb_pool_id): with session.begin(subtransactions=True): binding = nsx_models.NsxLbaasMonitor( loadbalancer_id=loadbalancer_id, pool_id=pool_id, hm_id=hm_id, lb_monitor_id=lb_monitor_id, lb_pool_id=lb_pool_id) session.add(binding) return binding def get_nsx_lbaas_monitor_binding(session, loadbalancer_id, pool_id, hm_id): try: return session.query(nsx_models.NsxLbaasMonitor).filter_by( loadbalancer_id=loadbalancer_id, pool_id=pool_id, hm_id=hm_id).one() except exc.NoResultFound: return def delete_nsx_lbaas_monitor_binding(session, loadbalancer_id, pool_id, hm_id): return (session.query(nsx_models.NsxLbaasMonitor). filter_by(loadbalancer_id=loadbalancer_id, pool_id=pool_id, hm_id=hm_id).delete()) def add_nsx_lbaas_l7policy_binding(session, l7policy_id, lb_rule_id, lb_vs_id): with session.begin(subtransactions=True): binding = nsx_models.NsxLbaasL7Policy( l7policy_id=l7policy_id, lb_rule_id=lb_rule_id, lb_vs_id=lb_vs_id) session.add(binding) return binding def get_nsx_lbaas_l7policy_binding(session, l7policy_id): try: return session.query(nsx_models.NsxLbaasL7Policy).filter_by( l7policy_id=l7policy_id).one() except exc.NoResultFound: return def delete_nsx_lbaas_l7policy_binding(session, l7policy_id): return (session.query(nsx_models.NsxLbaasL7Policy). filter_by(l7policy_id=l7policy_id).delete()) def add_project_plugin_mapping(session, project, plugin): with session.begin(subtransactions=True): binding = nsx_models.NsxProjectPluginMapping( project=project, plugin=plugin) session.add(binding) return binding def get_project_plugin_mapping(session, project): try: return session.query(nsx_models.NsxProjectPluginMapping).filter_by( project=project).one() except exc.NoResultFound: return def get_project_plugin_mappings(session): return session.query(nsx_models.NsxProjectPluginMapping).all() def get_project_plugin_mappings_by_plugin(session, plugin): return session.query(nsx_models.NsxProjectPluginMapping).filter_by( plugin=plugin).all() def add_nsx_vpn_connection_mapping(session, neutron_id, session_id, dpd_profile_id, ike_profile_id, ipsec_profile_id, peer_ep_id): with session.begin(subtransactions=True): mapping = nsx_models.NsxVpnConnectionMapping( neutron_id=neutron_id, session_id=session_id, dpd_profile_id=dpd_profile_id, ike_profile_id=ike_profile_id, ipsec_profile_id=ipsec_profile_id, peer_ep_id=peer_ep_id) session.add(mapping) return mapping def get_nsx_vpn_connection_mapping(session, neutron_id): try: return (session.query(nsx_models.NsxVpnConnectionMapping). filter_by(neutron_id=neutron_id).one()) except exc.NoResultFound: return def delete_nsx_vpn_connection_mapping(session, neutron_id): return (session.query(nsx_models.NsxVpnConnectionMapping). filter_by(neutron_id=neutron_id).delete()) vmware-nsx-12.0.1/vmware_nsx/db/maclearning.py0000666000175100017510000000573513244523345021357 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from sqlalchemy.orm import exc from neutron.db import _model_query as model_query from neutron.db import _resource_extend as resource_extend from neutron.db import _utils as db_utils from neutron.db import api as db_api from neutron_lib.api.definitions import port as port_def from oslo_log import log as logging from vmware_nsx.db import nsx_models from vmware_nsx.extensions import maclearning as mac LOG = logging.getLogger(__name__) @resource_extend.has_resource_extenders class MacLearningDbMixin(object): """Mixin class for mac learning.""" def _make_mac_learning_state_dict(self, port, fields=None): res = {'port_id': port['port_id'], mac.MAC_LEARNING: port[mac.MAC_LEARNING]} return db_utils.resource_fields(res, fields) @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_mac_learning_state(port_res, port_db): state = port_db.mac_learning_state if state and state.mac_learning_enabled: port_res[mac.MAC_LEARNING] = state.mac_learning_enabled def _update_mac_learning_state(self, context, port_id, enabled): try: query = model_query.query_with_hooks( context, nsx_models.MacLearningState) state = query.filter( nsx_models.MacLearningState.port_id == port_id).one() state.update({mac.MAC_LEARNING: enabled}) except exc.NoResultFound: self._create_mac_learning_state(context, {'id': port_id, mac.MAC_LEARNING: enabled}) def _create_mac_learning_state(self, context, port): with db_api.context_manager.writer.using(context): enabled = port[mac.MAC_LEARNING] state = nsx_models.MacLearningState( port_id=port['id'], mac_learning_enabled=enabled) context.session.add(state) return self._make_mac_learning_state_dict(state) def get_mac_learning_state(self, context, port_id): try: query = model_query.query_with_hooks( context, nsx_models.MacLearningState) state = query.filter( nsx_models.MacLearningState.port_id == port_id).one() return state.mac_learning_enabled except exc.NoResultFound: return None vmware-nsx-12.0.1/vmware_nsx/db/nsxv_models.py0000666000175100017510000003703413244523345021435 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db.models import l3 as l3_db from neutron.db import models_v2 from oslo_db.sqlalchemy import models from vmware_nsx.common import nsxv_constants class NsxvRouterBinding(model_base.BASEV2, model_base.HasStatusDescription, models.TimestampMixin): """Represents the mapping between neutron router and vShield Edge.""" __tablename__ = 'nsxv_router_bindings' # no ForeignKey to routers.id because for now, a router can be removed # from routers when delete_router is executed, but the binding is only # removed after the Edge is deleted router_id = sa.Column(sa.String(36), primary_key=True) edge_id = sa.Column(sa.String(36), nullable=True) lswitch_id = sa.Column(sa.String(36), nullable=True) appliance_size = sa.Column(sa.Enum( nsxv_constants.COMPACT, nsxv_constants.LARGE, nsxv_constants.XLARGE, nsxv_constants.QUADLARGE, name='nsxv_router_bindings_appliance_size')) edge_type = sa.Column(sa.Enum(nsxv_constants.SERVICE_EDGE, nsxv_constants.VDR_EDGE, name='nsxv_router_bindings_edge_type')) availability_zone = sa.Column(sa.String(36), nullable=True) class NsxvEdgeVnicBinding(model_base.BASEV2, models.TimestampMixin): """Represents mapping between vShield Edge vnic and neutron netowrk.""" __tablename__ = 'nsxv_edge_vnic_bindings' edge_id = sa.Column(sa.String(36), primary_key=True) vnic_index = sa.Column(sa.Integer(), primary_key=True) tunnel_index = sa.Column(sa.Integer(), primary_key=True) network_id = sa.Column(sa.String(36), nullable=True) class NsxvEdgeDhcpStaticBinding(model_base.BASEV2, models.TimestampMixin): """Represents mapping between mac addr and bindingId.""" __tablename__ = 'nsxv_edge_dhcp_static_bindings' edge_id = sa.Column(sa.String(36), primary_key=True) mac_address = sa.Column(sa.String(32), primary_key=True) binding_id = sa.Column(sa.String(36), nullable=False) class NsxvInternalNetworks(model_base.BASEV2, models.TimestampMixin): """Represents internal networks between NSXV plugin elements.""" __tablename__ = 'nsxv_internal_networks' network_purpose = sa.Column( sa.Enum(nsxv_constants.INTER_EDGE_PURPOSE, name='nsxv_internal_networks_purpose'), primary_key=True) network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id", ondelete="CASCADE"), nullable=True) availability_zone = sa.Column(sa.String(36), primary_key=True) class NsxvInternalEdges(model_base.BASEV2, models.TimestampMixin): """Represents internal Edge appliances for NSXV plugin operations.""" __tablename__ = 'nsxv_internal_edges' ext_ip_address = sa.Column(sa.String(64), primary_key=True) router_id = sa.Column(sa.String(36), nullable=True) purpose = sa.Column( sa.Enum(nsxv_constants.INTER_EDGE_PURPOSE, name='nsxv_internal_edges_purpose')) class NsxvSecurityGroupSectionMapping(model_base.BASEV2, models.TimestampMixin): """Backend mappings for Neutron Rule Sections. This class maps a neutron security group identifier to the corresponding NSX layer 3 section. """ __tablename__ = 'nsxv_security_group_section_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('securitygroups.id', ondelete="CASCADE"), primary_key=True) ip_section_id = sa.Column(sa.String(100)) class NsxvRuleMapping(model_base.BASEV2, models.TimestampMixin): """Backend mappings for Neutron Rule Sections. This class maps a neutron security group identifier to the corresponding NSX layer 3 and layer 2 sections. """ __tablename__ = 'nsxv_rule_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('securitygrouprules.id', ondelete="CASCADE"), primary_key=True) nsx_rule_id = sa.Column(sa.String(36), primary_key=True) class NsxvPortVnicMapping(model_base.BASEV2, models.TimestampMixin): """Maps neutron port to NSXv VM Vnic Id.""" __tablename__ = 'nsxv_port_vnic_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) nsx_id = sa.Column(sa.String(42), primary_key=True) class NsxvRouterExtAttributes(model_base.BASEV2, models.TimestampMixin): """Router attributes managed by NSX plugin extensions.""" __tablename__ = 'nsxv_router_ext_attributes' router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id', ondelete="CASCADE"), primary_key=True) distributed = sa.Column(sa.Boolean, default=False, nullable=False) router_type = sa.Column( sa.Enum('shared', 'exclusive', name='nsxv_router_type'), default='exclusive', nullable=False) service_router = sa.Column(sa.Boolean, default=False, nullable=False) # Add a relationship to the Router model in order to instruct # SQLAlchemy to eagerly load this association router = orm.relationship( l3_db.Router, backref=orm.backref("nsx_attributes", lazy='joined', uselist=False, cascade='delete')) class NsxvTzNetworkBinding(model_base.BASEV2, models.TimestampMixin): """Represents a binding of a virtual network with a transport zone. This model class associates a Neutron network with a transport zone; optionally a vlan ID might be used if the binding type is 'bridge' """ __tablename__ = 'nsxv_tz_network_bindings' network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), primary_key=True) binding_type = sa.Column( sa.Enum('flat', 'vlan', 'portgroup', 'vxlan', name='nsxv_tz_network_bindings_binding_type'), nullable=False, primary_key=True) phy_uuid = sa.Column(sa.String(36), primary_key=True, nullable=True) vlan_id = sa.Column(sa.Integer, primary_key=True, nullable=True, autoincrement=False) def __init__(self, network_id, binding_type, phy_uuid, vlan_id): self.network_id = network_id self.binding_type = binding_type self.phy_uuid = phy_uuid self.vlan_id = vlan_id def __repr__(self): return "" % (self.network_id, self.binding_type, self.phy_uuid, self.vlan_id) class NsxvPortIndexMapping(model_base.BASEV2, models.TimestampMixin): """Associates attached Neutron ports with the instance VNic index.""" __tablename__ = 'nsxv_port_index_mappings' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) device_id = sa.Column(sa.String(255), nullable=False) index = sa.Column(sa.Integer, nullable=False) __table_args__ = (sa.UniqueConstraint(device_id, index), model_base.BASEV2.__table_args__) # Add a relationship to the Port model in order to instruct SQLAlchemy to # eagerly read port vnic-index port = orm.relationship( models_v2.Port, backref=orm.backref("vnic_index", lazy='joined', uselist=False, cascade='delete')) class NsxvEdgeFirewallRuleBinding(model_base.BASEV2, models.TimestampMixin): """Mapping between firewall rule and edge firewall rule_id.""" __tablename__ = 'nsxv_firewall_rule_bindings' rule_id = sa.Column(sa.String(36), primary_key=True) edge_id = sa.Column(sa.String(36), primary_key=True) rule_vse_id = sa.Column(sa.String(36)) class NsxvSpoofGuardPolicyNetworkMapping(model_base.BASEV2, models.TimestampMixin): """Mapping between SpoofGuard and neutron networks""" __tablename__ = 'nsxv_spoofguard_policy_network_mappings' network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete='CASCADE'), primary_key=True, nullable=False) policy_id = sa.Column(sa.String(36), nullable=False) class NsxvLbaasLoadbalancerBinding(model_base.BASEV2, models.TimestampMixin): """Mapping between Edge LB and LBaaSv2""" __tablename__ = 'nsxv_lbaas_loadbalancer_bindings' loadbalancer_id = sa.Column(sa.String(36), sa.ForeignKey('lbaas_loadbalancers.id', name='fk_lbaas_loadbalancers_id', ondelete="CASCADE"), primary_key=True) edge_id = sa.Column(sa.String(36), nullable=False) edge_fw_rule_id = sa.Column(sa.String(36), nullable=False) vip_address = sa.Column(sa.String(36), nullable=False) class NsxvLbaasListenerBinding(model_base.BASEV2, models.TimestampMixin): """Mapping between Edge VSE and LBaaSv2""" __tablename__ = 'nsxv_lbaas_listener_bindings' loadbalancer_id = sa.Column(sa.String(36), primary_key=True) listener_id = sa.Column(sa.String(36), sa.ForeignKey('lbaas_listeners.id', name='fk_lbaas_listeners_id', ondelete="CASCADE"), primary_key=True) app_profile_id = sa.Column(sa.String(36), nullable=False) vse_id = sa.Column(sa.String(36), nullable=False) class NsxvLbaasPoolBinding(model_base.BASEV2, models.TimestampMixin): """Mapping between Edge Pool and LBaaSv2""" __tablename__ = 'nsxv_lbaas_pool_bindings' loadbalancer_id = sa.Column(sa.String(36), primary_key=True) pool_id = sa.Column(sa.String(36), sa.ForeignKey('lbaas_pools.id', name='fk_lbaas_pools_id', ondelete="CASCADE"), primary_key=True) edge_pool_id = sa.Column(sa.String(36), nullable=False) class NsxvLbaasMonitorBinding(model_base.BASEV2, models.TimestampMixin): """Mapping between Edge Monitor and LBaaSv2""" __tablename__ = 'nsxv_lbaas_monitor_bindings' loadbalancer_id = sa.Column(sa.String(36), primary_key=True) pool_id = sa.Column(sa.String(36), primary_key=True) hm_id = sa.Column(sa.String(36), sa.ForeignKey('lbaas_healthmonitors.id', name='fk_lbaas_healthmonitors_id', ondelete="CASCADE"), primary_key=True) edge_id = sa.Column(sa.String(36), primary_key=True) edge_mon_id = sa.Column(sa.String(36), nullable=False) class NsxvLbaasCertificateBinding(model_base.BASEV2, models.TimestampMixin): """Mapping between Edge certificate and LBaaSv2 object""" __tablename__ = 'nsxv_lbaas_certificate_bindings' cert_id = sa.Column(sa.String(128), primary_key=True) edge_id = sa.Column(sa.String(36), primary_key=True) edge_cert_id = sa.Column(sa.String(36), nullable=False) class NsxvLbaasL7PolicyBinding(model_base.BASEV2, models.TimestampMixin): """Mapping between NSX Edge and LBaaSv2 L7 policy """ __tablename__ = 'nsxv_lbaas_l7policy_bindings' policy_id = sa.Column(sa.String(36), sa.ForeignKey('lbaas_l7policies.id', name='fk_lbaas_l7policies_id', ondelete="CASCADE"), primary_key=True) edge_id = sa.Column(sa.String(36), nullable=False) edge_app_rule_id = sa.Column(sa.String(36), nullable=False) class NsxvSubnetExtAttributes(model_base.BASEV2, models.TimestampMixin): """Subnet attributes managed by NSX plugin extensions.""" __tablename__ = 'nsxv_subnet_ext_attributes' subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), primary_key=True) dns_search_domain = sa.Column(sa.String(255), nullable=True) dhcp_mtu = sa.Column(sa.Integer, nullable=True) # Add a relationship to the Subnet model in order to instruct # SQLAlchemy to eagerly load this association subnet = orm.relationship( models_v2.Subnet, backref=orm.backref("nsxv_subnet_attributes", lazy='joined', uselist=False, cascade='delete')) class NsxvPortExtAttributes(model_base.BASEV2, models.TimestampMixin): """Port attributes managed by NSX plugin extensions.""" __tablename__ = 'nsxv_port_ext_attributes' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) vnic_type = sa.Column(sa.String(64), nullable=False, default=portbindings.VNIC_NORMAL, server_default=portbindings.VNIC_NORMAL) # Add a relationship to the port model in order to instruct # SQLAlchemy to eagerly load this association port = orm.relationship( models_v2.Port, backref=orm.backref("nsx_port_attributes", lazy='joined', uselist=False, cascade='delete')) class NsxvBgpSpeakerBinding(model_base.BASEV2, models.TimestampMixin): # Maps bgp_speaker_id to NSXv edge id __tablename__ = 'nsxv_bgp_speaker_bindings' edge_id = sa.Column(sa.String(36), primary_key=True) bgp_speaker_id = sa.Column(sa.String(36), sa.ForeignKey('bgp_speakers.id', ondelete='CASCADE'), nullable=False) # A given BGP speaker sets the value of its BGP Identifier to an IP address # that is assigned to that BGP speaker. bgp_identifier = sa.Column(sa.String(64), nullable=False) class NsxvBgpPeerEdgeBinding(model_base.BASEV2, models.TimestampMixin): # Maps between bgp-peer and edges service gateway. __tablename__ = 'nsxv_bgp_peer_edge_bindings' peer_id = sa.Column(sa.String(36), sa.ForeignKey('bgp_peers.id', ondelete='CASCADE'), primary_key=True, nullable=False) edge_id = sa.Column(sa.String(36), nullable=False) vmware-nsx-12.0.1/vmware_nsx/db/nsx_portbindings_db.py0000666000175100017510000001554613244523345023137 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_serialization import jsonutils from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import portbindings as pbin from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api import validators from neutron_lib import constants from neutron_lib import exceptions from neutron_lib.plugins import directory from neutron.db import _resource_extend as resource_extend from neutron.db import api as db_api from neutron.db import portbindings_db as pbin_db from neutron.plugins.ml2 import models as pbin_model from vmware_nsx._i18n import _ from vmware_nsx.common import nsx_constants from vmware_nsx.common import utils as c_utils from vmware_nsx.db import nsxv_db LOG = logging.getLogger(__name__) FLAT_VLAN = 0 SUPPORTED_VNIC_TYPES = (pbin.VNIC_NORMAL, pbin.VNIC_DIRECT, pbin.VNIC_DIRECT_PHYSICAL) VNIC_TYPES_DIRECT_PASSTHROUGH = (pbin.VNIC_DIRECT, pbin.VNIC_DIRECT_PHYSICAL) @resource_extend.has_resource_extenders class NsxPortBindingMixin(pbin_db.PortBindingMixin): def _validate_port_vnic_type(self, context, port_data, network_id): vnic_type = port_data.get(pbin.VNIC_TYPE) if vnic_type and vnic_type not in SUPPORTED_VNIC_TYPES: err_msg = _("Invalid port vnic-type '%(vnic_type)s'." "Supported vnic-types are %(valid_types)s." ) % {'vnic_type': vnic_type, 'valid_types': SUPPORTED_VNIC_TYPES} raise exceptions.InvalidInput(error_message=err_msg) direct_vnic_type = vnic_type in VNIC_TYPES_DIRECT_PASSTHROUGH if direct_vnic_type: self._validate_vnic_type_direct_passthrough_for_network( context, network_id) return direct_vnic_type def _validate_vnic_type_direct_passthrough_for_network(self, context, network_id): supported_network_types = (c_utils.NsxVNetworkTypes.VLAN, c_utils.NsxVNetworkTypes.FLAT, c_utils.NsxVNetworkTypes.PORTGROUP) if not self._validate_network_type(context, network_id, supported_network_types): msg_info = { 'vnic_types': VNIC_TYPES_DIRECT_PASSTHROUGH, 'networks': supported_network_types} err_msg = _("%(vnic_types)s port vnic-types are only supported " "for ports on networks of types " "%(networks)s.") % msg_info raise exceptions.InvalidInput(error_message=err_msg) def _process_portbindings_create_and_update(self, context, port, port_res): super(NsxPortBindingMixin, self)._process_portbindings_create_and_update( context, port, port_res) port_id = port_res['id'] org_vnic_type = nsxv_db.get_nsxv_ext_attr_port_vnic_type( context.session, port_id) vnic_type = port.get(pbin.VNIC_TYPE, org_vnic_type) cap_port_filter = (port.get(pbin.VNIC_TYPE, org_vnic_type) == pbin.VNIC_NORMAL) vif_details = {pbin.CAP_PORT_FILTER: cap_port_filter} network = self.get_network(context, port_res['network_id']) if network.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.FLAT: vif_details[pbin.VIF_DETAILS_VLAN] = FLAT_VLAN elif network.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.VLAN: vif_details[pbin.VIF_DETAILS_VLAN] = network[pnet.SEGMENTATION_ID] with db_api.context_manager.writer.using(context): port_binding = context.session.query( pbin_model.PortBinding).filter_by(port_id=port_id).first() if not port_binding: port_binding = pbin_model.PortBinding( port_id=port_id, vif_type=nsx_constants.VIF_TYPE_DVS) context.session.add(port_binding) port_binding.host = port_res[pbin.HOST_ID] or '' port_binding.vnic_type = vnic_type port_binding.vif_details = jsonutils.dumps(vif_details) nsxv_db.update_nsxv_port_ext_attributes( context.session, port_id, vnic_type) profile = port.get(pbin.PROFILE, constants.ATTR_NOT_SPECIFIED) if validators.is_attr_set(profile) or profile is None: port_binding.profile = (jsonutils.dumps(profile) if profile else "") port_res[pbin.VNIC_TYPE] = vnic_type self.extend_port_portbinding(port_res, port_binding) def extend_port_portbinding(self, port_res, binding): port_res[pbin.PROFILE] = self._get_profile(binding) port_res[pbin.VIF_TYPE] = binding.vif_type port_res[pbin.VIF_DETAILS] = self._get_vif_details(binding) def _get_vif_details(self, binding): if binding.vif_details: try: return jsonutils.loads(binding.vif_details) except Exception: LOG.error("Serialized vif_details DB value '%(value)s' " "for port %(port)s is invalid", {'value': binding.vif_details, 'port': binding.port_id}) return {} def _get_profile(self, binding): if binding.profile: try: return jsonutils.loads(binding.profile) except Exception: LOG.error("Serialized profile DB value '%(value)s' for " "port %(port)s is invalid", {'value': binding.profile, 'port': binding.port_id}) return {} @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_portbinding(port_res, port_db): plugin = directory.get_plugin() plugin.extend_port_dict_binding(port_res, port_db) if port_db.nsx_port_attributes: port_res[pbin.VNIC_TYPE] = port_db.nsx_port_attributes.vnic_type if port_db.port_binding: plugin.extend_port_portbinding(port_res, port_db.port_binding) vmware-nsx-12.0.1/vmware_nsx/db/vcns_models.py0000666000175100017510000000260613244523345021405 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from oslo_db.sqlalchemy import models class VcnsRouterBinding(model_base.BASEV2, model_base.HasStatusDescription, models.TimestampMixin): """Represents the mapping between neutron router and vShield Edge.""" __tablename__ = 'vcns_router_bindings' # no ForeignKey to routers.id because for now, a router can be removed # from routers when delete_router is executed, but the binding is only # removed after the Edge is deleted router_id = sa.Column(sa.String(36), primary_key=True) edge_id = sa.Column(sa.String(16), nullable=True) lswitch_id = sa.Column(sa.String(36), nullable=False) vmware-nsx-12.0.1/vmware_nsx/db/distributedrouter.py0000666000175100017510000000175713244523345022662 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api.definitions import dvr as dvr_apidef from vmware_nsx.db import nsxrouter class DistributedRouter_mixin(nsxrouter.NsxRouterMixin): """Mixin class to enable distributed router support.""" nsx_attributes = ( nsxrouter.NsxRouterMixin.nsx_attributes + [{ 'name': dvr_apidef.DISTRIBUTED, 'default': False }]) vmware-nsx-12.0.1/vmware_nsx/db/nsxrouter.py0000666000175100017510000000460213244523345021140 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from neutron.db import _resource_extend as resource_extend from vmware_nsx.db import nsxv_models LOG = logging.getLogger(__name__) @resource_extend.has_resource_extenders class NsxRouterMixin(object): """Mixin class to enable nsx router support.""" nsx_attributes = [] @staticmethod def _extend_nsx_router_dict(router_res, router_db, nsx_attributes): nsx_attrs = router_db['nsx_attributes'] for attr in nsx_attributes: name = attr['name'] default = attr['default'] router_res[name] = ( nsx_attrs and nsx_attrs[name] or default) def _process_nsx_router_create( self, context, router_db, router_req): if not router_db['nsx_attributes']: kwargs = {} for attr in self.nsx_attributes: name = attr['name'] default = attr['default'] kwargs[name] = router_req.get(name, default) nsx_attributes = nsxv_models.NsxvRouterExtAttributes( router_id=router_db['id'], **kwargs) context.session.add(nsx_attributes) router_db['nsx_attributes'] = nsx_attributes else: # The situation where the record already exists will # be likely once the NSXRouterExtAttributes model # will allow for defining several attributes pertaining # to different extensions for attr in self.nsx_attributes: name = attr['name'] default = attr['default'] router_db['nsx_attributes'][name] = router_req.get( name, default) LOG.debug("Nsx router extension successfully processed " "for router:%s", router_db['id']) vmware-nsx-12.0.1/vmware_nsx/__init__.py0000666000175100017510000000117413244523345020242 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os NSX_EXT_PATH = os.path.join(os.path.dirname(__file__), 'extensions') vmware-nsx-12.0.1/vmware_nsx/common/0000775000175100017510000000000013244524600017407 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/common/utils.py0000666000175100017510000002231313244523345021131 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import re from distutils import version import functools import hashlib import xml.etree.ElementTree as et import eventlet import six import tenacity from tenacity import _utils as tenacity_utils from neutron import version as n_version from neutron_lib.api import validators from neutron_lib import constants from oslo_context import context as common_context from oslo_log import log from vmware_nsxlib.v3 import nsx_constants as v3_const LOG = log.getLogger(__name__) MAX_DISPLAY_NAME_LEN = 40 NEUTRON_VERSION = n_version.version_info.release_string() OS_NEUTRON_ID_SCOPE = 'os-neutron-id' # Allowed network types for the NSX Plugin class NetworkTypes(object): """Allowed provider network types for the NSX Plugin.""" L3_EXT = 'l3_ext' STT = 'stt' GRE = 'gre' FLAT = 'flat' VLAN = 'vlan' BRIDGE = 'bridge' PORTGROUP = 'portgroup' # Allowed network types for the NSX-v Plugin class NsxVNetworkTypes(object): """Allowed provider network types for the NSX-v Plugin.""" FLAT = 'flat' VLAN = 'vlan' VXLAN = 'vxlan' PORTGROUP = 'portgroup' # Allowed network types for the NSXv3 Plugin class NsxV3NetworkTypes(object): """Allowed provider network types for the NSXv3 Plugin.""" FLAT = 'flat' VLAN = 'vlan' GENEVE = 'geneve' NSX_NETWORK = 'nsx-net' def is_nsx_version_1_1_0(nsx_version): return (version.LooseVersion(nsx_version) >= version.LooseVersion(v3_const.NSX_VERSION_1_1_0)) def is_nsx_version_2_0_0(nsx_version): return (version.LooseVersion(nsx_version) >= version.LooseVersion(v3_const.NSX_VERSION_2_0_0)) def is_nsx_version_2_1_0(nsx_version): return (version.LooseVersion(nsx_version) >= version.LooseVersion(v3_const.NSX_VERSION_2_1_0)) def is_nsxv_version_6_2(nsx_version): return (version.LooseVersion(nsx_version) >= version.LooseVersion('6.2')) def is_nsxv_version_6_3(nsx_version): return (version.LooseVersion(nsx_version) >= version.LooseVersion('6.3')) def is_nsxv_dhcp_binding_supported(nsx_version): return ((version.LooseVersion(nsx_version) >= version.LooseVersion('6.3.3')) or (version.LooseVersion(nsx_version) >= version.LooseVersion('6.2.8') and version.LooseVersion(nsx_version) < version.LooseVersion('6.3'))) def get_tags(**kwargs): tags = ([dict(tag=value, scope=key) for key, value in six.iteritems(kwargs)]) tags.append({"tag": NEUTRON_VERSION, "scope": "quantum"}) return sorted(tags, key=lambda x: x['tag']) def device_id_to_vm_id(device_id, obfuscate=False): # device_id can be longer than 40 characters, for example # a device_id for a dhcp port is like the following: # # dhcp83b5fdeb-e3b4-5e18-ac5f-55161...80747326-47d7-46c2-a87a-cf6d5194877c # # To fit it into an NSX tag we need to hash it, however device_id # used for ports associated to VM's are small enough so let's skip the # hashing if len(device_id) > MAX_DISPLAY_NAME_LEN or obfuscate: return hashlib.sha1(device_id.encode()).hexdigest() else: return device_id or "N/A" def check_and_truncate(display_name): if (validators.is_attr_set(display_name) and len(display_name) > MAX_DISPLAY_NAME_LEN): LOG.debug("Specified name:'%s' exceeds maximum length. " "It will be truncated on NSX", display_name) return display_name[:MAX_DISPLAY_NAME_LEN] return display_name or '' def normalize_xml(data): data = data.encode('ascii', 'ignore') return et.fromstring(data) def _get_bad_request_error_code(e): """Get the error code out of the exception""" try: desc = normalize_xml(e.response) return int(desc.find('errorCode').text) except Exception: pass def _log_before_retry(func, trial_number): """Before call strategy that logs to some logger the attempt.""" if trial_number > 1: LOG.warning("Retrying call to '%(func)s' for the %(num)s time", {'func': tenacity_utils.get_callback_name(func), 'num': tenacity_utils.to_ordinal(trial_number)}) def _get_args_from_frame(frames, frame_num): if len(frames) > frame_num and frames[frame_num] and frames[frame_num][0]: argvalues = inspect.getargvalues(frames[frame_num][0]) formated_args = inspect.formatargvalues(*argvalues) # remove the first 'self' arg from the log as it adds no information formated_args = re.sub(r'\(self=.*?, ', "(", formated_args) return formated_args def _log_after_retry(func, trial_number, trial_time_taken): """After call strategy that logs to some logger the finished attempt.""" # Using inspect to get arguments of the relevant call frames = inspect.trace() formated_args = _get_args_from_frame(frames, 1) if not formated_args: formated_args = "Unknown" LOG.warning("Finished retry of %(func)s for the %(num)s time after " "%(time)0.3f(s) with args: %(args)s", {'func': tenacity_utils.get_callback_name(func), 'num': tenacity_utils.to_ordinal(trial_number), 'time': trial_time_taken, 'args': formated_args}) def retry_upon_exception_exclude_error_codes( exc, excluded_errors, delay, max_delay, max_attempts): """Retry with the configured exponential delay, unless the exception error code is in the given list """ def retry_if_not_error_codes(e): # return True only for BadRequests without error codes or with error # codes not in the exclude list if isinstance(e, exc): error_code = _get_bad_request_error_code(e) if error_code and error_code not in excluded_errors: return True return False return tenacity.retry(reraise=True, retry=tenacity.retry_if_exception( retry_if_not_error_codes), wait=tenacity.wait_exponential( multiplier=delay, max=max_delay), stop=tenacity.stop_after_attempt(max_attempts), before=_log_before_retry, after=_log_after_retry) def retry_upon_exception(exc, delay, max_delay, max_attempts): return tenacity.retry(reraise=True, retry=tenacity.retry_if_exception_type(exc), wait=tenacity.wait_exponential( multiplier=delay, max=max_delay), stop=tenacity.stop_after_attempt(max_attempts), before=_log_before_retry, after=_log_after_retry) def read_file(path): try: with open(path) as file: return file.read().strip() except IOError as e: LOG.error("Error while opening file " "%(path)s: %(err)s", {'path': path, 'err': str(e)}) def get_name_and_uuid(name, uuid, tag=None, maxlen=80): short_uuid = '_' + uuid[:5] + '...' + uuid[-5:] maxlen = maxlen - len(short_uuid) if tag: maxlen = maxlen - len(tag) - 1 return name[:maxlen] + '_' + tag + short_uuid else: return name[:maxlen] + short_uuid def is_ipv4_ip_address(addr): def _valid_part(part): try: int_part = int(part) if int_part < 0 or int_part > 255: return False return True except ValueError: return False parts = str(addr).split('.') if len(parts) != 4: return False for ip_part in parts: if not _valid_part(ip_part): return False return True def is_port_dhcp_configurable(port): owner = port.get('device_owner') return (owner and not owner.startswith(constants.DEVICE_OWNER_NETWORK_PREFIX)) def spawn_n(func, *args, **kwargs): """Passthrough method for eventlet.spawn_n. This utility exists so that it can be stubbed for testing without interfering with the service spawns. It will also grab the context from the threadlocal store and add it to the store on the new thread. This allows for continuity in logging the context when using this method to spawn a new thread. """ _context = common_context.get_current() @functools.wraps(func) def context_wrapper(*args, **kwargs): # NOTE: If update_store is not called after spawn_n it won't be # available for the logger to pull from threadlocal storage. if _context is not None: _context.update_store() func(*args, **kwargs) eventlet.spawn_n(context_wrapper, *args, **kwargs) vmware-nsx-12.0.1/vmware_nsx/common/availability_zones.py0000666000175100017510000001476313244523345023673 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import availability_zone as az_exc from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc DEFAULT_NAME = 'default' class ConfiguredAvailabilityZone(object): def __init__(self, config_line, default_name=DEFAULT_NAME): self.name = "" self._is_default = False if config_line and ':' in config_line: # Older configuration - each line contains all the relevant # values for one availability zones, separated by ':' values = config_line.split(':') self.name = values[0] self._validate_zone_name(self.name) self.init_from_config_line(config_line) elif config_line: # Newer configuration - the name of the availability zone can be # used to get the rest of the configuration for this AZ self.name = config_line self._validate_zone_name(config_line) self.init_from_config_section(self.name) else: # Default zone configuration self.name = default_name self._is_default = True self.init_default_az() def is_default(self): return self._is_default def _validate_zone_name(self, config_line): if len(self.name) > 36: raise nsx_exc.NsxInvalidConfiguration( opt_name="availability_zones", opt_value=config_line, reason=_("Maximum name length is 36")) @abc.abstractmethod def init_from_config_line(self, config_values): pass @abc.abstractmethod def init_from_config_section(self, az_name): pass @abc.abstractmethod def init_default_az(self): pass class ConfiguredAvailabilityZones(object): default_name = DEFAULT_NAME def __init__(self, az_conf, az_class, default_availability_zones=None): self.availability_zones = {} # Add the configured availability zones for az in az_conf: obj = az_class(az) self.availability_zones[obj.name] = obj # add a default entry obj = az_class(None, default_name=self.default_name) self.availability_zones[obj.name] = obj # validate the default az: if default_availability_zones: # we support only 1 default az if len(default_availability_zones) > 1: raise nsx_exc.NsxInvalidConfiguration( opt_name="default_availability_zones", opt_value=default_availability_zones, reason=_("The NSX plugin supports only 1 default AZ")) default_az_name = default_availability_zones[0] if (default_az_name not in self.availability_zones): raise nsx_exc.NsxInvalidConfiguration( opt_name="default_availability_zones", opt_value=default_availability_zones, reason=_("The default AZ is not defined in the NSX " "plugin")) else: self._default_az = self.availability_zones[default_az_name] else: self._default_az = self.availability_zones[self.default_name] def get_availability_zone(self, name): """Return an availability zone object by its name """ if name in self.availability_zones.keys(): return self.availability_zones[name] return self.get_default_availability_zone() def get_default_availability_zone(self): """Return the default availability zone object """ return self._default_az def list_availability_zones(self): """Return a list of availability zones names """ return self.availability_zones.keys() def list_availability_zones_objects(self): """Return a list of availability zones objects """ return self.availability_zones.values() class NSXAvailabilityZonesPluginCommon(object): @abc.abstractmethod def init_availability_zones(self): # To be initialized by the real plugin self._availability_zones_data = None def get_azs_list(self): return self._availability_zones_data.list_availability_zones_objects() def get_azs_names(self): return self._availability_zones_data.list_availability_zones() def validate_obj_azs(self, availability_zones): """Verify that the availability zones exist, and only 1 hint was set. """ # For now we support only 1 hint per network/router # TODO(asarfaty): support multiple hints if len(availability_zones) > 1: err_msg = _("Can't use multiple availability zone hints") raise n_exc.InvalidInput(error_message=err_msg) # check that all hints appear in the predefined list of availability # zones diff = (set(availability_zones) - set(self.get_azs_names())) if diff: raise az_exc.AvailabilityZoneNotFound( availability_zone=diff.pop()) def get_az_by_hint(self, hint): az = self._availability_zones_data.get_availability_zone(hint) if not az: raise az_def.AvailabilityZoneNotFound(availability_zone=hint) return az def get_default_az(self): return self._availability_zones_data.get_default_availability_zone() def get_obj_az_by_hints(self, obj): if az_def.AZ_HINTS in obj: for hint in obj[az_def.AZ_HINTS]: # For now we use only the first hint return self.get_az_by_hint(hint) # return the default return self.get_default_az() def get_network_az(self, network): return self.get_obj_az_by_hints(network) def get_router_az(self, router): return self.get_obj_az_by_hints(router) vmware-nsx-12.0.1/vmware_nsx/common/config.py0000666000175100017510000014213013244523345021236 0ustar zuulzuul00000000000000# Copyright 2012 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_config import types from oslo_log import log as logging from neutron.conf.db import l3_hamode_db from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.dvs import dvs_utils from vmware_nsx.extensions import projectpluginmap from vmware_nsx.extensions import routersize LOG = logging.getLogger(__name__) DEFAULT_VDR_TRANSIT_NETWORK = "169.254.2.0/28" DEFAULT_PLR_ADDRESS = "169.254.2.3" class AgentModes(object): AGENT = 'agent' AGENTLESS = 'agentless' COMBINED = 'combined' class MetadataModes(object): DIRECT = 'access_network' INDIRECT = 'dhcp_host_route' class ReplicationModes(object): SERVICE = 'service' SOURCE = 'source' base_opts = [ cfg.IntOpt('max_lp_per_bridged_ls', default=5000, deprecated_group='NVP', help=_("Maximum number of ports of a logical switch on a " "bridged transport zone. The recommended value for " "this parameter varies with NSX version.\nPlease use:\n" "NSX 2.x -> 64\nNSX 3.0, 3.1 -> 5000\n" "NSX 3.2 -> 10000")), cfg.IntOpt('max_lp_per_overlay_ls', default=256, deprecated_group='NVP', help=_("Maximum number of ports of a logical switch on an " "overlay transport zone")), cfg.IntOpt('concurrent_connections', default=10, deprecated_group='NVP', help=_("Maximum concurrent connections to each NSX " "controller.")), cfg.IntOpt('nsx_gen_timeout', default=-1, deprecated_name='nvp_gen_timeout', deprecated_group='NVP', help=_("Number of seconds a generation id should be valid for " "(default -1 meaning do not time out)")), cfg.StrOpt('metadata_mode', default=MetadataModes.DIRECT, deprecated_group='NVP', help=_("If set to access_network this enables a dedicated " "connection to the metadata proxy for metadata server " "access via Neutron router. If set to dhcp_host_route " "this enables host route injection via the dhcp agent. " "This option is only useful if running on a host that " "does not support namespaces otherwise access_network " "should be used.")), cfg.StrOpt('default_transport_type', default='stt', deprecated_group='NVP', help=_("The default network tranport type to use (stt, gre, " "bridge, ipsec_gre, or ipsec_stt)")), cfg.StrOpt('agent_mode', default=AgentModes.AGENT, deprecated_group='NVP', help=_("Specifies in which mode the plugin needs to operate " "in order to provide DHCP and metadata proxy services " "to tenant instances. If 'agent' is chosen (default) " "the NSX plugin relies on external RPC agents (i.e. " "dhcp and metadata agents) to provide such services. " "In this mode, the plugin supports API extensions " "'agent' and 'dhcp_agent_scheduler'. If 'agentless' " "is chosen (experimental in Icehouse), the plugin will " "use NSX logical services for DHCP and metadata proxy. " "This simplifies the deployment model for Neutron, in " "that the plugin no longer requires the RPC agents to " "operate. When 'agentless' is chosen, the config option " "metadata_mode becomes ineffective. The 'agentless' " "mode works only on NSX 4.1. Furthermore, a 'combined' " "mode is also provided and is used to support existing " "deployments that want to adopt the agentless mode. " "With this mode, existing networks keep being served by " "the existing infrastructure (thus preserving backward " "compatibility, whereas new networks will be served by " "the new infrastructure. Migration tools are provided " "to 'move' one network from one model to another; with " "agent_mode set to 'combined', option " "'network_auto_schedule' in neutron.conf is ignored, as " "new networks will no longer be scheduled to existing " "dhcp agents.")), cfg.StrOpt('replication_mode', default=ReplicationModes.SERVICE, choices=(ReplicationModes.SERVICE, ReplicationModes.SOURCE), help=_("Specifies which mode packet replication should be done " "in. If set to service a service node is required in " "order to perform packet replication. This can also be " "set to source if one wants replication to be performed " "locally (NOTE: usually only useful for testing if one " "does not want to deploy a service node). In order to " "leverage distributed routers, replication_mode should " "be set to 'service'.")), cfg.FloatOpt('qos_peak_bw_multiplier', default=2.0, min=1.0, help=_("The QoS rules peak bandwidth value will be the " "configured maximum bandwidth of the QoS rule, " "multiplied by this value. Value must be bigger than" " 1")), ] sync_opts = [ cfg.IntOpt('state_sync_interval', default=10, deprecated_group='NVP_SYNC', help=_("Interval in seconds between runs of the status " "synchronization task. The plugin will aim at " "resynchronizing operational status for all resources " "in this interval, and it should be therefore large " "enough to ensure the task is feasible. Otherwise the " "plugin will be constantly synchronizing resource " "status, ie: a new task is started as soon as the " "previous is completed. If this value is set to 0, the " "state synchronization thread for this Neutron instance " "will be disabled.")), cfg.IntOpt('max_random_sync_delay', default=0, deprecated_group='NVP_SYNC', help=_("Random additional delay between two runs of the state " "synchronization task. An additional wait time between " "0 and max_random_sync_delay seconds will be added on " "top of state_sync_interval.")), cfg.IntOpt('min_sync_req_delay', default=1, deprecated_group='NVP_SYNC', help=_("Minimum delay, in seconds, between two status " "synchronization requests for NSX. Depending on chunk " "size, controller load, and other factors, state " "synchronization requests might be pretty heavy. This " "means the controller might take time to respond, and " "its load might be quite increased by them. This " "parameter allows to specify a minimum interval between " "two subsequent requests. The value for this parameter " "must never exceed state_sync_interval. If this does, " "an error will be raised at startup.")), cfg.IntOpt('min_chunk_size', default=500, deprecated_group='NVP_SYNC', help=_("Minimum number of resources to be retrieved from NSX " "in a single status synchronization request. The actual " "size of the chunk will increase if the number of " "resources is such that using the minimum chunk size " "will cause the interval between two requests to be " "less than min_sync_req_delay")), cfg.BoolOpt('always_read_status', default=False, deprecated_group='NVP_SYNC', help=_("Enable this option to allow punctual state " "synchronization on show operations. In this way, show " "operations will always fetch the operational status " "of the resource from the NSX backend, and this might " "have a considerable impact on overall performance.")) ] connection_opts = [ cfg.StrOpt('nsx_user', default='admin', deprecated_name='nvp_user', help=_('User name for NSX controllers in this cluster')), cfg.StrOpt('nsx_password', default='admin', deprecated_name='nvp_password', secret=True, help=_('Password for NSX controllers in this cluster')), cfg.IntOpt('http_timeout', default=75, help=_('Time before aborting a request on an ' 'unresponsive controller (Seconds)')), cfg.IntOpt('retries', default=2, help=_('Maximum number of times a particular request ' 'should be retried')), cfg.IntOpt('redirects', default=2, help=_('Maximum number of times a redirect response ' 'should be followed')), cfg.ListOpt('nsx_controllers', default=[], deprecated_name='nvp_controllers', help=_('Comma-separated list of NSX controller ' 'endpoints (:). When port is omitted, ' '443 is assumed. This option MUST be specified. ' 'e.g.: aa.bb.cc.dd, ee.ff.gg.hh.ee:80')), cfg.IntOpt('conn_idle_timeout', default=900, help=_('Reconnect connection to nsx if not used within this ' 'amount of time.')), ] cluster_opts = [ cfg.StrOpt('default_tz_uuid', help=_("This is uuid of the default NSX Transport zone that " "will be used for creating tunneled isolated " "\"Neutron\" networks. It needs to be created in NSX " "before starting Neutron with the nsx plugin.")), cfg.StrOpt('default_l3_gw_service_uuid', help=_("(Optional) UUID of the NSX L3 Gateway " "service which will be used for implementing routers " "and floating IPs")), cfg.StrOpt('default_l2_gw_service_uuid', help=_("(Optional) UUID of the NSX L2 Gateway service " "which will be used by default for network gateways")), cfg.StrOpt('default_service_cluster_uuid', help=_("(Optional) UUID of the Service Cluster which will " "be used by logical services like dhcp and metadata")), cfg.StrOpt('nsx_default_interface_name', default='breth0', deprecated_name='default_interface_name', help=_("Name of the interface on a L2 Gateway transport node " "which should be used by default when setting up a " "network connection")), ] nsx_common_opts = [ cfg.StrOpt('nsx_l2gw_driver', help=_("Specify the class path for the Layer 2 gateway " "backend driver(i.e. NSXv3/NSX-V). This field will be " "used when a L2 Gateway service plugin is configured.")), cfg.StrOpt('locking_coordinator_url', help=_("(Optional) URL for distributed locking coordination " "resource for lock manager. This value is passed as a " "parameter to tooz coordinator. By default, value is " "None and oslo_concurrency is used for single-node " "lock management.")), cfg.BoolOpt('api_replay_mode', default=False, help=_("If true, the server then allows the caller to " "specify the id of resources. This should only " "be enabled in order to allow one to migrate an " "existing install of neutron to the nsx-v3 plugin.")), cfg.ListOpt('nsx_extension_drivers', default=[], help=_("An ordered list of extension driver " "entrypoints to be loaded from the " "vmware_nsx.extension_drivers namespace.")), ] nsx_v3_opts = [ cfg.ListOpt('nsx_api_user', default=['admin'], help=_('User names for the NSX managers')), cfg.ListOpt('nsx_api_password', default=['default'], secret=True, help=_('Passwords for the NSX managers')), cfg.ListOpt('nsx_api_managers', default=[], help=_("IP address of one or more NSX managers separated " "by commas. The IP address should be of the form:\n" "[://][:]\nIf scheme is not " "provided https is used. If port is not provided port " "80 is used for http and port 443 for https.")), cfg.BoolOpt('nsx_use_client_auth', default=False, help=_("Use client certificate in NSX manager " "authentication")), cfg.StrOpt('nsx_client_cert_file', default='', help=_("File to contain client certificate and private key")), cfg.StrOpt('nsx_client_cert_pk_password', default="", secret=True, help=_("password for private key encryption")), cfg.StrOpt('nsx_client_cert_storage', default='nsx-db', choices=['nsx-db', 'none'], help=_("Storage type for client certificate sensitive data")), cfg.StrOpt('default_overlay_tz', help=_("This is the name or UUID of the default NSX overlay " "transport zone that will be used for creating " "tunneled isolated Neutron networks. It needs to be " "created in NSX before starting Neutron with the NSX " "plugin.")), cfg.StrOpt('default_vlan_tz', help=_("(Optional) Only required when creating VLAN or flat " "provider networks. Name or UUID of default NSX VLAN " "transport zone that will be used for bridging between " "Neutron networks, if no physical network has been " "specified")), cfg.StrOpt('default_bridge_cluster', help=_("(Optional) Name or UUID of the default NSX bridge " "cluster that will be used to perform L2 gateway " "bridging between VXLAN and VLAN networks. If default " "bridge cluster UUID is not specified, admin will have " "to manually create a L2 gateway corresponding to a " "NSX Bridge Cluster using L2 gateway APIs. This field " "must be specified on one of the active neutron " "servers only.")), cfg.IntOpt('retries', default=10, help=_('Maximum number of times to retry API requests upon ' 'stale revision errors.')), cfg.ListOpt('ca_file', help=_('Specify a CA bundle files to use in verifying the NSX ' 'Managers server certificate. This option is ignored ' 'if "insecure" is set to True. If "insecure" is set to ' 'False and ca_file is unset, the system root CAs will ' 'be used to verify the server certificate.')), cfg.BoolOpt('insecure', default=True, help=_('If true, the NSX Manager server certificate is not ' 'verified. If false the CA bundle specified via ' '"ca_file" will be used or if unsest the default ' 'system root CAs will be used.')), cfg.IntOpt('http_timeout', default=10, help=_('The time in seconds before aborting a HTTP connection ' 'to a NSX manager.')), cfg.IntOpt('http_read_timeout', default=180, help=_('The time in seconds before aborting a HTTP read ' 'response from a NSX manager.')), cfg.IntOpt('http_retries', default=3, help=_('Maximum number of times to retry a HTTP connection.')), cfg.IntOpt('concurrent_connections', default=10, help=_("Maximum concurrent connections to each NSX " "manager.")), cfg.IntOpt('conn_idle_timeout', default=10, help=_("The amount of time in seconds to wait before ensuring " "connectivity to the NSX manager if no manager " "connection has been used.")), cfg.IntOpt('redirects', default=2, help=_('Number of times a HTTP redirect should be followed.')), cfg.StrOpt('default_tier0_router', help=_("Name or UUID of the default tier0 router that will be " "used for connecting to tier1 logical routers and " "configuring external networks")), cfg.IntOpt('number_of_nested_groups', default=8, help=_("(Optional) The number of nested groups which are used " "by the plugin, each Neutron security-groups is added " "to one nested group, and each nested group can contain " "as maximum as 500 security-groups, therefore, the " "maximum number of security groups that can be created " "is 500 * number_of_nested_groups. The default is 8 " "nested groups, which allows a maximum of 4k " "security-groups, to allow creation of more " "security-groups, modify this figure.")), cfg.StrOpt('metadata_mode', default=MetadataModes.DIRECT, help=_("If set to access_network this enables a dedicated " "connection to the metadata proxy for metadata server " "access via Neutron router. If set to dhcp_host_route " "this enables host route injection via the dhcp agent. " "This option is only useful if running on a host that " "does not support namespaces otherwise access_network " "should be used.")), cfg.BoolOpt('metadata_on_demand', default=False, help=_("If true, an internal metadata network will be created " "for a router only when the router is attached to a " "DHCP-disabled subnet.")), cfg.BoolOpt('native_dhcp_metadata', default=True, help=_("If true, DHCP and metadata proxy services will be " "provided by NSX backend.")), cfg.StrOpt('native_metadata_route', default="169.254.169.254/31", help=_("The metadata route used for native metadata proxy " "service.")), cfg.StrOpt('dhcp_profile', help=_("This is the name or UUID of the NSX DHCP Profile " "that will be used to enable native DHCP service. It " "needs to be created in NSX before starting Neutron " "with the NSX plugin")), cfg.IntOpt('dhcp_lease_time', default=86400, help=_("DHCP default lease time.")), cfg.StrOpt('dns_domain', default='openstacklocal', help=_("Domain to use for building the hostnames.")), cfg.ListOpt('nameservers', default=[], help=_("List of nameservers to configure for the DHCP " "binding entries. These will be used if there are no " "nameservers defined on the subnet.")), cfg.StrOpt('metadata_proxy', help=_("This is the name or UUID of the NSX Metadata Proxy " "that will be used to enable native metadata service. " "It needs to be created in NSX before starting Neutron " "with the NSX plugin.")), cfg.StrOpt('dhcp_relay_service', help=_("(Optional) This is the name or UUID of the NSX dhcp " "relay service that will be used to enable DHCP relay " "on router ports.")), cfg.BoolOpt('log_security_groups_blocked_traffic', default=False, help=_("(Optional) Indicates whether distributed-firewall " "rule for security-groups blocked traffic is logged.")), cfg.BoolOpt('log_security_groups_allowed_traffic', default=False, help=_("(Optional) Indicates whether distributed-firewall " "security-groups rules are logged.")), cfg.ListOpt('availability_zones', default=[], help=_('Optional parameter defining the networks availability ' 'zones names for the native dhcp configuration. The ' 'configuration of each zone will be under a group ' 'names [az:]')), cfg.BoolOpt('init_objects_by_tags', default=False, help=_("When True, the configured transport zones, router and " "profiles will be found by tags on the NSX. The scope " "of the tag will be the value of search_objects_" "scope. The value of the search tag will be the name " "configured in each respective configuration.")), cfg.StrOpt('search_objects_scope', help=_("This is the scope of the tag that will be used for " "finding the objects uuids on the NSX during plugin " "init.")), cfg.ListOpt('switching_profiles', default=[], help=_("Optional parameter defining a list switching profiles " "uuids that will be attached to all neutron created " "nsx ports.")), cfg.BoolOpt('ens_support', default=False, help=_("(Optional) Indicates whether ENS transport zones can " "be used")), ] DEFAULT_STATUS_CHECK_INTERVAL = 2000 DEFAULT_MINIMUM_POOLED_EDGES = 1 DEFAULT_MAXIMUM_POOLED_EDGES = 3 DEFAULT_MAXIMUM_TUNNELS_PER_VNIC = 20 nsxv_opts = [ cfg.StrOpt('user', default='admin', help=_('User name for NSXv manager')), cfg.StrOpt('password', default='default', secret=True, help=_('Password for NSXv manager')), cfg.StrOpt('manager_uri', help=_('URL for NSXv manager')), cfg.StrOpt('ca_file', help=_('Specify a CA bundle file to use in verifying the NSXv ' 'server certificate.')), cfg.BoolOpt('insecure', default=True, help=_('If true, the NSXv server certificate is not verified. ' 'If false, then the default CA truststore is used for ' 'verification. This option is ignored if "ca_file" is ' 'set.')), cfg.ListOpt('cluster_moid', default=[], help=_('(Required) Parameter listing the IDs of the clusters ' 'which are used by OpenStack.')), cfg.StrOpt('datacenter_moid', help=_('Required parameter identifying the ID of datacenter ' 'to deploy NSX Edges')), cfg.StrOpt('deployment_container_id', help=_('Optional parameter identifying the ID of datastore to ' 'deploy NSX Edges')), cfg.StrOpt('resource_pool_id', help=_('Optional parameter identifying the ID of resource to ' 'deploy NSX Edges')), cfg.ListOpt('availability_zones', default=[], help=_('Optional parameter defining the availability zones ' 'names for deploying NSX Edges. The configuration of ' 'each zone will be under a group names [az:]')), cfg.StrOpt('datastore_id', help=_('Optional parameter identifying the ID of datastore to ' 'deploy NSX Edges')), cfg.StrOpt('ha_datastore_id', help=_('Optional parameter identifying the ID of datastore to ' 'deploy NSX Edges in addition to data_store_id in case' 'edge_ha is True')), cfg.BoolOpt('ha_placement_random', default=False, help=_('When True and in case edge_ha is True, half of the ' 'edges will be placed in the primary datastore as ' 'active and the other half will be placed in the ' 'ha_datastore')), cfg.ListOpt('edge_host_groups', default=[], help=_('(Optional) If edge HA is used then this will ensure ' 'that active/backup edges are placed in the listed ' 'host groups. At least 2 predefined host groups need ' 'to be configured.')), cfg.StrOpt('external_network', help=_('(Required) Network ID for physical network ' 'connectivity')), cfg.IntOpt('task_status_check_interval', default=DEFAULT_STATUS_CHECK_INTERVAL, help=_("(Optional) Asynchronous task status check interval. " "Default is 2000 (millisecond)")), cfg.StrOpt('vdn_scope_id', help=_('(Optional) Network scope ID for VXLAN virtual wires')), cfg.StrOpt('dvs_id', help=_('(Optional) DVS MoRef ID for DVS connected to ' 'Management / Edge cluster')), cfg.IntOpt('maximum_tunnels_per_vnic', default=DEFAULT_MAXIMUM_TUNNELS_PER_VNIC, min=1, max=110, help=_('(Optional) Maximum number of sub interfaces supported ' 'per vnic in edge.')), cfg.ListOpt('backup_edge_pool', default=['service:compact:4:10', 'vdr:compact:4:10'], help=_("Defines edge pool's management range with the format: " ":[edge_size]::." "edge_type: service,vdr. " "edge_size: compact, large, xlarge, quadlarge " "and default is compact. By default, edge pool manager " "would manage service edge with compact size " "and distributed edge with compact size as following: " "service:compact:4:10,vdr:compact:" "4:10")), cfg.IntOpt('retries', default=20, help=_('Maximum number of API retries on endpoint.')), cfg.StrOpt('mgt_net_moid', help=_('(Optional) Portgroup MoRef ID for metadata proxy ' 'management network')), cfg.ListOpt('mgt_net_proxy_ips', default=[], help=_('(Optional) Comma separated list of management network ' 'IP addresses for metadata proxy.')), cfg.StrOpt('mgt_net_proxy_netmask', help=_("(Optional) Management network netmask for metadata " "proxy.")), cfg.StrOpt('mgt_net_default_gateway', help=_("(Optional) Management network default gateway for " "metadata proxy.")), cfg.ListOpt('nova_metadata_ips', default=[], help=_("(Optional) IP addresses used by Nova metadata " "service.")), cfg.PortOpt('nova_metadata_port', default=8775, help=_("(Optional) TCP Port used by Nova metadata server.")), cfg.StrOpt('metadata_shared_secret', secret=True, help=_("(Optional) Shared secret to sign metadata requests.")), cfg.BoolOpt('metadata_insecure', default=True, help=_("(Optional) If True, the end to end connection for " "metadata service is not verified. If False, the " "default CA truststore is used for verification.")), cfg.StrOpt('metadata_nova_client_cert', help=_('(Optional) Client certificate to use when metadata ' 'connection is to be verified. If not provided, ' 'a self signed certificate will be used.')), cfg.StrOpt('metadata_nova_client_priv_key', help=_("(Optional) Private key of client certificate.")), cfg.BoolOpt('spoofguard_enabled', default=True, help=_("(Optional) If True then plugin will use NSXV " "spoofguard component for port-security feature.")), cfg.BoolOpt('use_exclude_list', default=True, help=_("(Optional) If True then plugin will use NSXV exclude " "list component when port security is disabled and " "spoofguard is enabled.")), cfg.ListOpt('tenant_router_types', default=['shared', 'distributed', 'exclusive'], help=_("Ordered list of router_types to allocate as tenant " "routers. It limits the router types that the Nsxv " "can support for tenants:\ndistributed: router is " "supported by distributed edge at the backend.\n" "shared: multiple routers share the same service " "edge at the backend.\nexclusive: router exclusively " "occupies one service edge at the backend.\nNsxv would " "select the first available router type from " "tenant_router_types list if router-type is not " "specified. If the tenant defines the router type with " "'--distributed','--router_type exclusive' or " "'--router_type shared', Nsxv would verify that the " "router type is in tenant_router_types. Admin supports " "all these three router types.")), cfg.StrOpt('edge_appliance_user', secret=True, help=_("(Optional) Username to configure for Edge appliance " "login.")), cfg.StrOpt('edge_appliance_password', secret=True, help=_("(Optional) Password to configure for Edge appliance " "login.")), cfg.IntOpt('dhcp_lease_time', default=86400, help=_("(Optional) DHCP default lease time.")), cfg.BoolOpt('metadata_initializer', default=True, help=_("If True, the server instance will attempt to " "initialize the metadata infrastructure")), cfg.ListOpt('metadata_service_allowed_ports', item_type=types.Port(), default=[], help=_('List of tcp ports, to be allowed access to the ' 'metadata proxy, in addition to the default ' '80,443,8775 tcp ports')), cfg.BoolOpt('edge_ha', default=False, help=_("(Optional) Enable HA for NSX Edges.")), cfg.StrOpt('exclusive_router_appliance_size', default="compact", choices=routersize.VALID_EDGE_SIZES, help=_("(Optional) Edge appliance size to be used for creating " "exclusive router. Valid values: " "['compact', 'large', 'xlarge', 'quadlarge']. This " "exclusive_router_appliance_size will be picked up if " "--router-size parameter is not specified while doing " "neutron router-create")), cfg.StrOpt('shared_router_appliance_size', default="compact", choices=routersize.VALID_EDGE_SIZES, help=_("(Optional) Edge appliance size to be used for creating " "shared router edge. Valid values: " "['compact', 'large', 'xlarge', 'quadlarge'].")), cfg.StrOpt('dns_search_domain', help=_("(Optional) Use this search domain if there is no " "search domain configured on the subnet.")), cfg.ListOpt('nameservers', default=[], help=_('List of nameservers to configure for the DHCP binding ' 'entries. These will be used if there are no ' 'nameservers defined on the subnet.')), cfg.BoolOpt('use_dvs_features', default=False, help=_('If True, dvs features will be supported which ' 'involves configuring the dvs backing nsx_v directly. ' 'If False, only features exposed via nsx_v will be ' 'supported')), cfg.BoolOpt('log_security_groups_blocked_traffic', default=False, help=_("(Optional) Indicates whether distributed-firewall " "rule for security-groups blocked traffic is logged.")), cfg.BoolOpt('log_security_groups_allowed_traffic', default=False, help=_("(Optional) Indicates whether distributed-firewall " "security-groups allowed traffic is logged.")), cfg.StrOpt('service_insertion_profile_id', help=_("(Optional) The profile id of the redirect firewall " "rules that will be used for the Service Insertion " "feature.")), cfg.BoolOpt('service_insertion_redirect_all', default=False, help=_("(Optional) If set to True, the plugin will create " "a redirect rule to send all the traffic to the " "security partner")), cfg.BoolOpt('use_nsx_policies', default=False, help=_("If set to True, the plugin will use NSX policies " "in the neutron security groups.")), cfg.StrOpt('default_policy_id', help=_("(Optional) If use_nsx_policies is True, this policy " "will be used as the default policy for new tenants.")), cfg.BoolOpt('allow_tenant_rules_with_policy', default=False, help=_("(Optional) If use_nsx_policies is True, this value " "will determine if a tenants can add rules to their " "security groups.")), cfg.StrOpt('vdr_transit_network', default=DEFAULT_VDR_TRANSIT_NETWORK, help=_("(Optional) Sets the network address for distributed " "router TLR-PLR connectivity, with " "/ syntax")), cfg.BoolOpt('bind_floatingip_to_all_interfaces', default=False, help=_("If set to False, router will associate floating ip " "with external interface of only, thus denying " "connectivity between hosts on same network via " "their floating ips. If True, floating ip will " "be associated with all router interfaces.")), cfg.BoolOpt('exclusive_dhcp_edge', default=False, help=_("(Optional) Have exclusive DHCP edge per network.")), cfg.IntOpt('bgp_neighbour_hold_down_timer', default=4, help=_("(Optional) Set the interval (Seconds) for BGP " "neighbour hold down time.")), cfg.IntOpt('bgp_neighbour_keep_alive_timer', default=1, help=_("(Optional) Set the interval (Seconds) for BGP " "neighbour keep alive time.")), cfg.IntOpt('ecmp_wait_time', default=2, help=_("(Optional) Set the wait time (Seconds) between " "enablement of ECMP.")), cfg.ListOpt('network_vlan_ranges', default=[], help=_("List of :: " "specifying DVS MoRef ID usable for VLAN provider " "networks, as well as ranges of VLAN tags on each " "available for allocation to networks.")), cfg.IntOpt('nsx_transaction_timeout', default=240, help=_("Timeout interval for NSX backend transactions.")), cfg.BoolOpt('share_edges_between_tenants', default=True, help=_("If False, different tenants will not use the same " "DHCP edge or router edge.")), cfg.ListOpt('housekeeping_jobs', default=['error_dhcp_edge', 'error_backup_edge'], help=_("List of the enabled housekeeping jobs")), cfg.ListOpt('housekeeping_readonly_jobs', default=[], help=_("List of housekeeping jobs which are enabled in read " "only mode")), cfg.BoolOpt('housekeeping_readonly', default=True, help=_("Housekeeping will only warn about breakage.")), cfg.BoolOpt('use_default_block_all', default=False, help=_("Use default block all rule when no security groups " "are set on a port and port security is enabled")), ] # define the configuration of each NSX-V availability zone. # the list of expected zones is under nsxv group: availability_zones # Note: if any of the optional arguments is missing - the global one will be # used instead. nsxv_az_opts = [ cfg.StrOpt('resource_pool_id', help=_('Identifying the ID of resource to deploy NSX Edges')), cfg.StrOpt('datastore_id', help=_('Identifying the ID of datastore to deploy NSX Edges')), cfg.BoolOpt('edge_ha', default=False, help=_("(Optional) Enable HA for NSX Edges.")), cfg.StrOpt('ha_datastore_id', help=_('Optional parameter identifying the ID of datastore to ' 'deploy NSX Edges in addition to data_store_id in case' 'edge_ha is True')), cfg.BoolOpt('ha_placement_random', help=_('When True and in case edge_ha is True, half of the ' 'edges will be placed in the primary datastore as ' 'active and the other half will be placed in the ' 'ha_datastore. If this value is not set, the global ' 'one will be used')), cfg.ListOpt('edge_host_groups', default=[], help=_('(Optional) If edge HA is used then this will ensure ' 'that active/backup edges are placed in the listed ' 'host groups. At least 2 predefined host groups need ' 'to be configured.')), cfg.StrOpt('datacenter_moid', help=_('(Optional) Identifying the ID of datacenter to deploy ' 'NSX Edges')), cfg.ListOpt('backup_edge_pool', help=_("(Optional) Defines edge pool's management range for " "the availability zone. If not defined, the global one " "will be used")), cfg.StrOpt('mgt_net_moid', help=_('(Optional) Portgroup MoRef ID for metadata proxy ' 'management network')), cfg.ListOpt('mgt_net_proxy_ips', default=[], help=_('(Optional) Comma separated list of management network ' 'IP addresses for metadata proxy.')), cfg.StrOpt('mgt_net_proxy_netmask', help=_("(Optional) Management network netmask for metadata " "proxy.")), cfg.StrOpt('mgt_net_default_gateway', help=_("(Optional) Management network default gateway for " "metadata proxy.")), cfg.StrOpt('external_network', help=_('(Optional) Network ID for physical network ' 'connectivity')), cfg.StrOpt('vdn_scope_id', help=_('(Optional) Network scope ID for VXLAN virtual wires')), cfg.StrOpt('dvs_id', help=_('(Optional) DVS MoRef ID for DVS connected to ' 'Management / Edge cluster')), cfg.BoolOpt('exclusive_dhcp_edge', default=False, help=_("(Optional) Have exclusive DHCP edge per network.")), cfg.BoolOpt('bind_floatingip_to_all_interfaces', default=False, help=_("If set to False, router will associate floating ip " "with external interface of only, thus denying " "connectivity between hosts on same network via " "their floating ips. If True, floating ip will " "be associated with all router interfaces.")), ] # define the configuration of each NSX-V3 availability zone. # the list of expected zones is under nsx_v3 group: availability_zones # Note: if any of the optional arguments is missing - the global one will be # used instead. nsxv3_az_opts = [ cfg.StrOpt('metadata_proxy', help=_("The name or UUID of the NSX Metadata Proxy " "that will be used to enable native metadata service. " "It needs to be created in NSX before starting Neutron " "with the NSX plugin.")), cfg.StrOpt('dhcp_profile', help=_("The name or UUID of the NSX DHCP Profile " "that will be used to enable native DHCP service. It " "needs to be created in NSX before starting Neutron " "with the NSX plugin")), cfg.StrOpt('native_metadata_route', help=_("(Optional) The metadata route used for native metadata " "proxy service.")), cfg.StrOpt('dns_domain', help=_("(Optional) Domain to use for building the hostnames.")), cfg.ListOpt('nameservers', help=_("(Optional) List of nameservers to configure for the " "DHCP binding entries. These will be used if there are " "no nameservers defined on the subnet.")), cfg.StrOpt('default_overlay_tz', help=_("(Optional) This is the name or UUID of the default NSX " "overlay transport zone that will be used for creating " "tunneled isolated Neutron networks. It needs to be " "created in NSX before starting Neutron with the NSX " "plugin.")), cfg.StrOpt('default_vlan_tz', help=_("(Optional) Only required when creating VLAN or flat " "provider networks. Name or UUID of default NSX VLAN " "transport zone that will be used for bridging between " "Neutron networks, if no physical network has been " "specified")), cfg.ListOpt('switching_profiles', help=_("(Optional) list switching profiles uuids that will be " "attached to all neutron created nsx ports.")), cfg.StrOpt('dhcp_relay_service', help=_("(Optional) This is the name or UUID of the NSX dhcp " "relay service that will be used to enable DHCP relay " "on router ports.")), ] nsx_tvd_opts = [ cfg.ListOpt('nsx_v_extension_drivers', default=[], help=_("An ordered list of NSX-V extension driver " "entrypoints to be loaded from the " "vmware_nsx.extension_drivers namespace.")), cfg.ListOpt('nsx_v3_extension_drivers', default=[], help=_("An ordered list of NSX-T extension driver " "entrypoints to be loaded from the " "vmware_nsx.extension_drivers namespace.")), cfg.ListOpt('dvs_extension_drivers', default=[], help=_("An ordered list of DVS extension driver " "entrypoints to be loaded from the " "vmware_nsx.extension_drivers namespace.")), cfg.StrOpt('default_plugin', default=projectpluginmap.NsxPlugins.NSX_T, choices=projectpluginmap.VALID_TYPES, help=_("The default plugin that will be used for new projects " "that were not added to the projects plugin mapping.")), cfg.ListOpt('nsx_v_default_availability_zones', default=[], help=_("The default availability zones that will be used for " "NSX-V networks and routers creation under the TVD " "plugin.")), cfg.ListOpt('nsx_v3_default_availability_zones', default=[], help=_("The default availability zones that will be used for " "NSX-V3 networks and routers creation under the TVD " "plugin.")), ] # Register the configuration options cfg.CONF.register_opts(connection_opts) cfg.CONF.register_opts(cluster_opts) cfg.CONF.register_opts(nsx_common_opts) cfg.CONF.register_opts(nsx_v3_opts, group="nsx_v3") cfg.CONF.register_opts(nsxv_opts, group="nsxv") cfg.CONF.register_opts(nsx_tvd_opts, group="nsx_tvd") cfg.CONF.register_opts(base_opts, group="NSX") cfg.CONF.register_opts(sync_opts, group="NSX_SYNC") # register l3_ha config opts. This is due to commit # a7c633dc8e8a67e65e558ecbdf9ea8efc5468251 cfg.CONF.register_opts(l3_hamode_db.L3_HA_OPTS) def _register_nsx_azs(conf, availability_zones, az_opts): # first verify that the availability zones are in the format of a # list of names. The old format was a list of values for each az, # separated with ':' if not availability_zones or len(availability_zones[0].split(':')) > 1: return for az in availability_zones: az_group = 'az:%s' % az conf.register_group(cfg.OptGroup( name=az_group, title="Configuration for availability zone %s" % az)) conf.register_opts(az_opts, group=az_group) # register a group for each nsxv/v3 availability zones def register_nsxv_azs(conf, availability_zones): _register_nsx_azs(conf, availability_zones, nsxv_az_opts) def register_nsxv3_azs(conf, availability_zones): _register_nsx_azs(conf, availability_zones, nsxv3_az_opts) register_nsxv_azs(cfg.CONF, cfg.CONF.nsxv.availability_zones) register_nsxv3_azs(cfg.CONF, cfg.CONF.nsx_v3.availability_zones) def _get_nsx_az_opts(az, opts): az_info = dict() group = 'az:%s' % az if group not in cfg.CONF: raise nsx_exc.NsxInvalidConfiguration( opt_name=group, opt_value='None', reason=(_("Configuration group \'%s\' must be defined") % group)) for opt in opts: az_info[opt.name] = cfg.CONF[group][opt.name] return az_info def get_nsxv_az_opts(az): return _get_nsx_az_opts(az, nsxv_az_opts) def get_nsxv3_az_opts(az): return _get_nsx_az_opts(az, nsxv3_az_opts) def validate_nsxv_config_options(): if (cfg.CONF.nsxv.manager_uri is None or cfg.CONF.nsxv.user is None or cfg.CONF.nsxv.password is None): error = _("manager_uri, user, and password must be configured!") raise nsx_exc.NsxPluginException(err_msg=error) if cfg.CONF.nsxv.dvs_id is None: LOG.warning("dvs_id must be configured to support VLANs!") if cfg.CONF.nsxv.vdn_scope_id is None: LOG.warning("vdn_scope_id must be configured to support VXLANs!") if cfg.CONF.nsxv.use_dvs_features and not dvs_utils.dvs_is_enabled( dvs_id=cfg.CONF.nsxv.dvs_id): error = _("dvs host/vcenter credentials must be defined to use " "dvs features") raise nsx_exc.NsxPluginException(err_msg=error) def validate_nsx_config_options(): if cfg.CONF.nsx_extension_drivers: error = _("nsx_extension_drivers should not be configured!") raise nsx_exc.NsxPluginException(err_msg=error) vmware-nsx-12.0.1/vmware_nsx/common/sync.py0000666000175100017510000007456413244523345020764 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import random from neutron_lib import constants from neutron_lib import context as n_context from neutron_lib import exceptions from neutron_lib.exceptions import l3 as l3_exc from oslo_log import log from oslo_serialization import jsonutils from oslo_service import loopingcall from oslo_utils import timeutils import six from neutron.db import _model_query as model_query from neutron.db import api as db_api from neutron.db.models import external_net as external_net_db from neutron.db.models import l3 as l3_db from neutron.db import models_v2 from vmware_nsx._i18n import _ from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import nsx_utils from vmware_nsx.nsxlib import mh as nsxlib from vmware_nsx.nsxlib.mh import router as routerlib from vmware_nsx.nsxlib.mh import switch as switchlib # Maximum page size for a single request # NOTE(salv-orlando): This might become a version-dependent map should the # limit be raised in future versions MAX_PAGE_SIZE = 5000 LOG = log.getLogger(__name__) class NsxCache(object): """A simple Cache for NSX resources. Associates resource id with resource hash to rapidly identify updated resources. Each entry in the cache also stores the following information: - changed: the resource in the cache has been altered following an update or a delete - hit: the resource has been visited during an update (and possibly left unchanged) - data: current resource data - data_bk: backup of resource data prior to its removal """ def __init__(self): # Maps an uuid to the dict containing it self._uuid_dict_mappings = {} # Dicts for NSX cached resources self._lswitches = {} self._lswitchports = {} self._lrouters = {} def __getitem__(self, key): # uuids are unique across the various types of resources # TODO(salv-orlando): Avoid lookups over all dictionaries # when retrieving items # Fetch lswitches, lports, or lrouters resources = self._uuid_dict_mappings[key] return resources[key] def _clear_changed_flag_and_remove_from_cache(self, resources): # Clear the 'changed' attribute for all items # NOTE(arosen): the copy.copy is to avoid: 'RuntimeError: # dictionary changed size during iteration' for py3 for uuid, item in copy.copy(resources).items(): if item.pop('changed', None) and not item.get('data'): # The item is not anymore in NSX, so delete it del resources[uuid] del self._uuid_dict_mappings[uuid] LOG.debug("Removed item %s from NSX object cache", uuid) def _update_resources(self, resources, new_resources, clear_changed=True): if clear_changed: self._clear_changed_flag_and_remove_from_cache(resources) def do_hash(item): return hash(jsonutils.dumps(item)) # Parse new data and identify new, deleted, and updated resources for item in new_resources: item_id = item['uuid'] if resources.get(item_id): new_hash = do_hash(item) if new_hash != resources[item_id]['hash']: resources[item_id]['hash'] = new_hash resources[item_id]['changed'] = True resources[item_id]['data_bk'] = ( resources[item_id]['data']) resources[item_id]['data'] = item # Mark the item as hit in any case resources[item_id]['hit'] = True LOG.debug("Updating item %s in NSX object cache", item_id) else: resources[item_id] = {'hash': do_hash(item)} resources[item_id]['hit'] = True resources[item_id]['changed'] = True resources[item_id]['data'] = item # add an uuid to dict mapping for easy retrieval # with __getitem__ self._uuid_dict_mappings[item_id] = resources LOG.debug("Added item %s to NSX object cache", item_id) def _delete_resources(self, resources): # Mark for removal all the elements which have not been visited. # And clear the 'hit' attribute. for to_delete in [k for (k, v) in six.iteritems(resources) if not v.pop('hit', False)]: resources[to_delete]['changed'] = True resources[to_delete]['data_bk'] = ( resources[to_delete].pop('data', None)) def _get_resource_ids(self, resources, changed_only): if changed_only: return [k for (k, v) in six.iteritems(resources) if v.get('changed')] return resources.keys() def get_lswitches(self, changed_only=False): return self._get_resource_ids(self._lswitches, changed_only) def get_lrouters(self, changed_only=False): return self._get_resource_ids(self._lrouters, changed_only) def get_lswitchports(self, changed_only=False): return self._get_resource_ids(self._lswitchports, changed_only) def update_lswitch(self, lswitch): self._update_resources(self._lswitches, [lswitch], clear_changed=False) def update_lrouter(self, lrouter): self._update_resources(self._lrouters, [lrouter], clear_changed=False) def update_lswitchport(self, lswitchport): self._update_resources(self._lswitchports, [lswitchport], clear_changed=False) def process_updates(self, lswitches=None, lrouters=None, lswitchports=None): self._update_resources(self._lswitches, lswitches) self._update_resources(self._lrouters, lrouters) self._update_resources(self._lswitchports, lswitchports) return (self._get_resource_ids(self._lswitches, changed_only=True), self._get_resource_ids(self._lrouters, changed_only=True), self._get_resource_ids(self._lswitchports, changed_only=True)) def process_deletes(self): self._delete_resources(self._lswitches) self._delete_resources(self._lrouters) self._delete_resources(self._lswitchports) return (self._get_resource_ids(self._lswitches, changed_only=True), self._get_resource_ids(self._lrouters, changed_only=True), self._get_resource_ids(self._lswitchports, changed_only=True)) class SyncParameters(object): """Defines attributes used by the synchronization procedure. chunk_size: Actual chunk size extra_chunk_size: Additional data to fetch because of chunk size adjustment current_chunk: Counter of the current data chunk being synchronized Page cursors: markers for the next resource to fetch. 'start' means page cursor unset for fetching 1st page init_sync_performed: True if the initial synchronization concluded """ def __init__(self, min_chunk_size): self.chunk_size = min_chunk_size self.extra_chunk_size = 0 self.current_chunk = 0 self.ls_cursor = 'start' self.lr_cursor = 'start' self.lp_cursor = 'start' self.init_sync_performed = False self.total_size = 0 def _start_loopingcall(min_chunk_size, state_sync_interval, func, initial_delay=5): """Start a loopingcall for the synchronization task.""" # Start a looping call to synchronize operational status # for neutron resources if not state_sync_interval: # do not start the looping call if specified # sync interval is 0 return state_synchronizer = loopingcall.DynamicLoopingCall( func, sp=SyncParameters(min_chunk_size)) state_synchronizer.start( initial_delay=initial_delay, periodic_interval_max=state_sync_interval) return state_synchronizer class NsxSynchronizer(object): LS_URI = nsxlib._build_uri_path( switchlib.LSWITCH_RESOURCE, fields='uuid,tags,fabric_status', relations='LogicalSwitchStatus') LR_URI = nsxlib._build_uri_path( routerlib.LROUTER_RESOURCE, fields='uuid,tags,fabric_status', relations='LogicalRouterStatus') LP_URI = nsxlib._build_uri_path( switchlib.LSWITCHPORT_RESOURCE, parent_resource_id='*', fields='uuid,tags,fabric_status_up', relations='LogicalPortStatus') def __init__(self, plugin, cluster, state_sync_interval, req_delay, min_chunk_size, max_rand_delay=0, initial_delay=5): random.seed() self._nsx_cache = NsxCache() # Store parameters as instance members # NOTE(salv-orlando): apologies if it looks java-ish self._plugin = plugin self._cluster = cluster self._req_delay = req_delay self._sync_interval = state_sync_interval self._max_rand_delay = max_rand_delay # Validate parameters if self._sync_interval < self._req_delay: err_msg = (_("Minimum request delay:%(req_delay)s must not " "exceed synchronization interval:%(sync_interval)s") % {'req_delay': self._req_delay, 'sync_interval': self._sync_interval}) LOG.error(err_msg) raise nsx_exc.NsxPluginException(err_msg=err_msg) # Backoff time in case of failures while fetching sync data self._sync_backoff = 1 # Store the looping call in an instance variable to allow unit tests # for controlling its lifecycle self._sync_looping_call = _start_loopingcall( min_chunk_size, state_sync_interval, self._synchronize_state, initial_delay=initial_delay) def _get_tag_dict(self, tags): return dict((tag.get('scope'), tag['tag']) for tag in tags) def synchronize_network(self, context, neutron_network_data, lswitches=None): """Synchronize a Neutron network with its NSX counterpart. This routine synchronizes a set of switches when a Neutron network is mapped to multiple lswitches. """ if not lswitches: # Try to get logical switches from nsx try: lswitches = nsx_utils.fetch_nsx_switches( context.session, self._cluster, neutron_network_data['id']) except exceptions.NetworkNotFound: # TODO(salv-orlando): We should be catching # api_exc.ResourceNotFound here # The logical switch was not found LOG.warning("Logical switch for neutron network %s not " "found on NSX.", neutron_network_data['id']) lswitches = [] else: for lswitch in lswitches: self._nsx_cache.update_lswitch(lswitch) # By default assume things go wrong status = constants.NET_STATUS_ERROR # In most cases lswitches will contain a single element for ls in lswitches: if not ls: # Logical switch was deleted break ls_status = ls['_relations']['LogicalSwitchStatus'] if not ls_status['fabric_status']: status = constants.NET_STATUS_DOWN break else: # No switch was down or missing. Set status to ACTIVE unless # there were no switches in the first place! if lswitches: status = constants.NET_STATUS_ACTIVE # Update db object if status == neutron_network_data['status']: # do nothing return with db_api.context_manager.writer.using(context): try: network = self._plugin._get_network(context, neutron_network_data['id']) except exceptions.NetworkNotFound: pass else: network.status = status LOG.debug("Updating status for neutron resource %(q_id)s to:" " %(status)s", {'q_id': neutron_network_data['id'], 'status': status}) def _synchronize_lswitches(self, ctx, ls_uuids, scan_missing=False): if not ls_uuids and not scan_missing: return neutron_net_ids = set() neutron_nsx_mappings = {} # TODO(salvatore-orlando): Deal with the case the tag # has been tampered with for ls_uuid in ls_uuids: # If the lswitch has been deleted, get backup copy of data lswitch = (self._nsx_cache[ls_uuid].get('data') or self._nsx_cache[ls_uuid].get('data_bk')) tags = self._get_tag_dict(lswitch['tags']) neutron_id = tags.get('quantum_net_id') neutron_net_ids.add(neutron_id) neutron_nsx_mappings[neutron_id] = ( neutron_nsx_mappings.get(neutron_id, []) + [self._nsx_cache[ls_uuid]]) # Fetch neutron networks from database filters = {'router:external': [False]} if not scan_missing: filters['id'] = neutron_net_ids networks = model_query.get_collection( ctx, models_v2.Network, self._plugin._make_network_dict, filters=filters) for network in networks: lswitches = neutron_nsx_mappings.get(network['id'], []) lswitches = [lsw.get('data') for lsw in lswitches] self.synchronize_network(ctx, network, lswitches) def synchronize_router(self, context, neutron_router_data, lrouter=None): """Synchronize a neutron router with its NSX counterpart.""" if not lrouter: # Try to get router from nsx try: # This query will return the logical router status too nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self._cluster, neutron_router_data['id']) if nsx_router_id: lrouter = routerlib.get_lrouter( self._cluster, nsx_router_id) except exceptions.NotFound: # NOTE(salv-orlando): We should be catching # api_exc.ResourceNotFound here # The logical router was not found LOG.warning("Logical router for neutron router %s not " "found on NSX.", neutron_router_data['id']) if lrouter: # Update the cache self._nsx_cache.update_lrouter(lrouter) # Note(salv-orlando): It might worth adding a check to verify neutron # resource tag in nsx entity matches a Neutron id. # By default assume things go wrong status = constants.NET_STATUS_ERROR if lrouter: lr_status = (lrouter['_relations'] ['LogicalRouterStatus'] ['fabric_status']) status = (lr_status and constants.NET_STATUS_ACTIVE or constants.NET_STATUS_DOWN) # Update db object if status == neutron_router_data['status']: # do nothing return with db_api.context_manager.writer.using(context): try: router = self._plugin._get_router(context, neutron_router_data['id']) except l3_exc.RouterNotFound: pass else: router.status = status LOG.debug("Updating status for neutron resource %(q_id)s to:" " %(status)s", {'q_id': neutron_router_data['id'], 'status': status}) def _synchronize_lrouters(self, ctx, lr_uuids, scan_missing=False): if not lr_uuids and not scan_missing: return # TODO(salvatore-orlando): Deal with the case the tag # has been tampered with neutron_router_mappings = {} for lr_uuid in lr_uuids: lrouter = (self._nsx_cache[lr_uuid].get('data') or self._nsx_cache[lr_uuid].get('data_bk')) tags = self._get_tag_dict(lrouter['tags']) neutron_router_id = tags.get('q_router_id') if neutron_router_id: neutron_router_mappings[neutron_router_id] = ( self._nsx_cache[lr_uuid]) else: LOG.warning("Unable to find Neutron router id for " "NSX logical router: %s", lr_uuid) # Fetch neutron routers from database filters = ({} if scan_missing else {'id': neutron_router_mappings.keys()}) routers = model_query.get_collection( ctx, l3_db.Router, self._plugin._make_router_dict, filters=filters) for router in routers: lrouter = neutron_router_mappings.get(router['id']) self.synchronize_router( ctx, router, lrouter and lrouter.get('data')) def synchronize_port(self, context, neutron_port_data, lswitchport=None, ext_networks=None): """Synchronize a Neutron port with its NSX counterpart.""" # Skip synchronization for ports on external networks if not ext_networks: ext_networks = [net['id'] for net in context.session.query( models_v2.Network).join( external_net_db.ExternalNetwork, (models_v2.Network.id == external_net_db.ExternalNetwork.network_id))] if neutron_port_data['network_id'] in ext_networks: with db_api.context_manager.writer.using(context): neutron_port_data['status'] = constants.PORT_STATUS_ACTIVE return if not lswitchport: # Try to get port from nsx try: ls_uuid, lp_uuid = nsx_utils.get_nsx_switch_and_port_id( context.session, self._cluster, neutron_port_data['id']) if lp_uuid: lswitchport = switchlib.get_port( self._cluster, ls_uuid, lp_uuid, relations='LogicalPortStatus') except (exceptions.PortNotFoundOnNetwork): # NOTE(salv-orlando): We should be catching # api_exc.ResourceNotFound here instead # of PortNotFoundOnNetwork when the id exists but # the logical switch port was not found LOG.warning("Logical switch port for neutron port %s " "not found on NSX.", neutron_port_data['id']) lswitchport = None else: # If lswitchport is not None, update the cache. # It could be none if the port was deleted from the backend if lswitchport: self._nsx_cache.update_lswitchport(lswitchport) # Note(salv-orlando): It might worth adding a check to verify neutron # resource tag in nsx entity matches Neutron id. # By default assume things go wrong status = constants.PORT_STATUS_ERROR if lswitchport: lp_status = (lswitchport['_relations'] ['LogicalPortStatus'] ['fabric_status_up']) status = (lp_status and constants.PORT_STATUS_ACTIVE or constants.PORT_STATUS_DOWN) # Update db object if status == neutron_port_data['status']: # do nothing return with db_api.context_manager.writer.using(context): try: port = self._plugin._get_port(context, neutron_port_data['id']) except exceptions.PortNotFound: pass else: port.status = status LOG.debug("Updating status for neutron resource %(q_id)s to:" " %(status)s", {'q_id': neutron_port_data['id'], 'status': status}) def _synchronize_lswitchports(self, ctx, lp_uuids, scan_missing=False): if not lp_uuids and not scan_missing: return # Find Neutron port id by tag - the tag is already # loaded in memory, no reason for doing a db query # TODO(salvatore-orlando): Deal with the case the tag # has been tampered with neutron_port_mappings = {} for lp_uuid in lp_uuids: lport = (self._nsx_cache[lp_uuid].get('data') or self._nsx_cache[lp_uuid].get('data_bk')) tags = self._get_tag_dict(lport['tags']) neutron_port_id = tags.get('q_port_id') if neutron_port_id: neutron_port_mappings[neutron_port_id] = ( self._nsx_cache[lp_uuid]) # Fetch neutron ports from database # At the first sync we need to fetch all ports filters = ({} if scan_missing else {'id': neutron_port_mappings.keys()}) # TODO(salv-orlando): Work out a solution for avoiding # this query ext_nets = [net['id'] for net in ctx.session.query( models_v2.Network).join( external_net_db.ExternalNetwork, (models_v2.Network.id == external_net_db.ExternalNetwork.network_id))] ports = model_query.get_collection( ctx, models_v2.Port, self._plugin._make_port_dict, filters=filters) for port in ports: lswitchport = neutron_port_mappings.get(port['id']) self.synchronize_port( ctx, port, lswitchport and lswitchport.get('data'), ext_networks=ext_nets) def _get_chunk_size(self, sp): # NOTE(salv-orlando): Try to use __future__ for this routine only? ratio = ((float(sp.total_size) / float(sp.chunk_size)) / (float(self._sync_interval) / float(self._req_delay))) new_size = max(1.0, ratio) * float(sp.chunk_size) return int(new_size) + (new_size - int(new_size) > 0) def _fetch_data(self, uri, cursor, page_size): # If not cursor there is nothing to retrieve if cursor: if cursor == 'start': cursor = None # Chunk size tuning might, in some conditions, make it larger # than 5,000, which is the maximum page size allowed by the NSX # API. In this case the request should be split in multiple # requests. This is not ideal, and therefore a log warning will # be emitted. num_requests = page_size // (MAX_PAGE_SIZE + 1) + 1 if num_requests > 1: LOG.warning("Requested page size is %(cur_chunk_size)d. " "It might be necessary to do %(num_requests)d " "round-trips to NSX for fetching data. Please " "tune sync parameters to ensure chunk size " "is less than %(max_page_size)d", {'cur_chunk_size': page_size, 'num_requests': num_requests, 'max_page_size': MAX_PAGE_SIZE}) # Only the first request might return the total size, # subsequent requests will definitely not results, cursor, total_size = nsxlib.get_single_query_page( uri, self._cluster, cursor, min(page_size, MAX_PAGE_SIZE)) for _req in range(num_requests - 1): # If no cursor is returned break the cycle as there is no # actual need to perform multiple requests (all fetched) # This happens when the overall size of resources exceeds # the maximum page size, but the number for each single # resource type is below this threshold if not cursor: break req_results, cursor = nsxlib.get_single_query_page( uri, self._cluster, cursor, min(page_size, MAX_PAGE_SIZE))[:2] results.extend(req_results) # reset cursor before returning if we queried just to # know the number of entities return results, cursor if page_size else 'start', total_size return [], cursor, None def _fetch_nsx_data_chunk(self, sp): base_chunk_size = sp.chunk_size chunk_size = base_chunk_size + sp.extra_chunk_size LOG.info("Fetching up to %s resources " "from NSX backend", chunk_size) fetched = ls_count = lr_count = lp_count = 0 lswitches = lrouters = lswitchports = [] if sp.ls_cursor or sp.ls_cursor == 'start': (lswitches, sp.ls_cursor, ls_count) = self._fetch_data( self.LS_URI, sp.ls_cursor, chunk_size) fetched = len(lswitches) if fetched < chunk_size and sp.lr_cursor or sp.lr_cursor == 'start': (lrouters, sp.lr_cursor, lr_count) = self._fetch_data( self.LR_URI, sp.lr_cursor, max(chunk_size - fetched, 0)) fetched += len(lrouters) if fetched < chunk_size and sp.lp_cursor or sp.lp_cursor == 'start': (lswitchports, sp.lp_cursor, lp_count) = self._fetch_data( self.LP_URI, sp.lp_cursor, max(chunk_size - fetched, 0)) fetched += len(lswitchports) if sp.current_chunk == 0: # No cursors were provided. Then it must be possible to # calculate the total amount of data to fetch sp.total_size = ls_count + lr_count + lp_count LOG.debug("Total data size: %d", sp.total_size) sp.chunk_size = self._get_chunk_size(sp) # Calculate chunk size adjustment sp.extra_chunk_size = sp.chunk_size - base_chunk_size LOG.debug("Fetched %(num_lswitches)d logical switches, " "%(num_lswitchports)d logical switch ports," "%(num_lrouters)d logical routers", {'num_lswitches': len(lswitches), 'num_lswitchports': len(lswitchports), 'num_lrouters': len(lrouters)}) return (lswitches, lrouters, lswitchports) def _synchronize_state(self, sp): # If the plugin has been destroyed, stop the LoopingCall if not self._plugin: raise loopingcall.LoopingCallDone() start = timeutils.utcnow() # Reset page cursor variables if necessary if sp.current_chunk == 0: sp.ls_cursor = sp.lr_cursor = sp.lp_cursor = 'start' LOG.info("Running state synchronization task. Chunk: %s", sp.current_chunk) # Fetch chunk_size data from NSX try: (lswitches, lrouters, lswitchports) = ( self._fetch_nsx_data_chunk(sp)) except (api_exc.RequestTimeout, api_exc.NsxApiException): sleep_interval = self._sync_backoff # Cap max back off to 64 seconds self._sync_backoff = min(self._sync_backoff * 2, 64) LOG.exception("An error occurred while communicating with " "NSX backend. Will retry synchronization " "in %d seconds", sleep_interval) return sleep_interval LOG.debug("Time elapsed querying NSX: %s", timeutils.utcnow() - start) if sp.total_size: num_chunks = ((sp.total_size / sp.chunk_size) + (sp.total_size % sp.chunk_size != 0)) else: num_chunks = 1 LOG.debug("Number of chunks: %d", num_chunks) # Find objects which have changed on NSX side and need # to be synchronized LOG.debug("Processing NSX cache for updated objects") (ls_uuids, lr_uuids, lp_uuids) = self._nsx_cache.process_updates( lswitches, lrouters, lswitchports) # Process removed objects only at the last chunk scan_missing = (sp.current_chunk == num_chunks - 1 and not sp.init_sync_performed) if sp.current_chunk == num_chunks - 1: LOG.debug("Processing NSX cache for deleted objects") self._nsx_cache.process_deletes() ls_uuids = self._nsx_cache.get_lswitches( changed_only=not scan_missing) lr_uuids = self._nsx_cache.get_lrouters( changed_only=not scan_missing) lp_uuids = self._nsx_cache.get_lswitchports( changed_only=not scan_missing) LOG.debug("Time elapsed hashing data: %s", timeutils.utcnow() - start) # Get an admin context ctx = n_context.get_admin_context() # Synchronize with database self._synchronize_lswitches(ctx, ls_uuids, scan_missing=scan_missing) self._synchronize_lrouters(ctx, lr_uuids, scan_missing=scan_missing) self._synchronize_lswitchports(ctx, lp_uuids, scan_missing=scan_missing) # Increase chunk counter LOG.info("Synchronization for chunk %(chunk_num)d of " "%(total_chunks)d performed", {'chunk_num': sp.current_chunk + 1, 'total_chunks': num_chunks}) sp.current_chunk = (sp.current_chunk + 1) % num_chunks added_delay = 0 if sp.current_chunk == 0: # Ensure init_sync_performed is True if not sp.init_sync_performed: sp.init_sync_performed = True # Add additional random delay added_delay = random.randint(0, self._max_rand_delay) LOG.debug("Time elapsed at end of sync: %s", timeutils.utcnow() - start) return self._sync_interval / num_chunks + added_delay vmware-nsx-12.0.1/vmware_nsx/common/driver_api.py0000666000175100017510000001506213244523345022120 0ustar zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class ExtensionDriver(object): """Define stable abstract interface for extension drivers. An extension driver extends the core resources implemented by the plugin with additional attributes. Methods that process create and update operations for these resources validate and persist values for extended attributes supplied through the API. Other methods extend the resource dictionaries returned from the API operations with the values of the extended attributes. """ @abc.abstractmethod def initialize(self): """Perform driver initialization. Called after all drivers have been loaded and the database has been initialized. No abstract methods defined below will be called prior to this method being called. """ pass @property def extension_alias(self): """Supported extension alias. Return the alias identifying the core API extension supported by this driver. Do not declare if API extension handling will be left to a service plugin, and we just need to provide core resource extension and updates. """ pass def process_create_network(self, plugin_context, data, result): """Process extended attributes for create network. :param plugin_context: plugin request context :param data: dictionary of incoming network data :param result: network dictionary to extend Called inside transaction context on plugin_context.session to validate and persist any extended network attributes defined by this driver. Extended attribute values must also be added to result. """ pass def process_create_subnet(self, plugin_context, data, result): """Process extended attributes for create subnet. :param plugin_context: plugin request context :param data: dictionary of incoming subnet data :param result: subnet dictionary to extend Called inside transaction context on plugin_context.session to validate and persist any extended subnet attributes defined by this driver. Extended attribute values must also be added to result. """ pass def process_create_port(self, plugin_context, data, result): """Process extended attributes for create port. :param plugin_context: plugin request context :param data: dictionary of incoming port data :param result: port dictionary to extend Called inside transaction context on plugin_context.session to validate and persist any extended port attributes defined by this driver. Extended attribute values must also be added to result. """ pass def process_update_network(self, plugin_context, data, result): """Process extended attributes for update network. :param plugin_context: plugin request context :param data: dictionary of incoming network data :param result: network dictionary to extend Called inside transaction context on plugin_context.session to validate and update any extended network attributes defined by this driver. Extended attribute values, whether updated or not, must also be added to result. """ pass def process_update_subnet(self, plugin_context, data, result): """Process extended attributes for update subnet. :param plugin_context: plugin request context :param data: dictionary of incoming subnet data :param result: subnet dictionary to extend Called inside transaction context on plugin_context.session to validate and update any extended subnet attributes defined by this driver. Extended attribute values, whether updated or not, must also be added to result. """ pass def process_update_port(self, plugin_context, data, result): """Process extended attributes for update port. :param plugin_context: plugin request context :param data: dictionary of incoming port data :param result: port dictionary to extend Called inside transaction context on plugin_context.session to validate and update any extended port attributes defined by this driver. Extended attribute values, whether updated or not, must also be added to result. """ pass def extend_network_dict(self, session, base_model, result): """Add extended attributes to network dictionary. :param session: database session :param base_model: network model data :param result: network dictionary to extend Called inside transaction context on session to add any extended attributes defined by this driver to a network dictionary to be used for driver calls and/or returned as the result of a network operation. """ pass def extend_subnet_dict(self, session, base_model, result): """Add extended attributes to subnet dictionary. :param session: database session :param base_model: subnet model data :param result: subnet dictionary to extend Called inside transaction context on session to add any extended attributes defined by this driver to a subnet dictionary to be used for driver calls and/or returned as the result of a subnet operation. """ pass def extend_port_dict(self, session, base_model, result): """Add extended attributes to port dictionary. :param session: database session :param base_model: port model data :param result: port dictionary to extend Called inside transaction context on session to add any extended attributes defined by this driver to a port dictionary to be used for driver calls and/or returned as the result of a port operation. """ pass vmware-nsx-12.0.1/vmware_nsx/common/nsx_constants.py0000666000175100017510000000133613244523345022677 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # L2 agent vif type VIF_TYPE_DVS = 'dvs' # NSXv3 CORE PLUGIN PATH VMWARE_NSX_V3_PLUGIN_NAME = 'vmware_nsxv3' vmware-nsx-12.0.1/vmware_nsx/common/nsxv_constants.py0000666000175100017510000000471213244523345023066 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Edge size COMPACT = 'compact' LARGE = 'large' XLARGE = 'xlarge' QUADLARGE = 'quadlarge' SHARED = "shared" EXCLUSIVE = "exclusive" # Edge type SERVICE_EDGE = 'service' VDR_EDGE = 'vdr' # Internal element purpose INTER_EDGE_PURPOSE = 'inter_edge_net' # etc INTERNAL_TENANT_ID = 'metadata_internal_project' # L2 gateway edge name prefix L2_GATEWAY_EDGE = 'L2 bridging' # An artificial limit for router name length - subtract 1 for the - separator ROUTER_NAME_LENGTH = (78 - 1) # LoadBalancer Certificate constants #NOTE(abhiraut): Number of days specify the total number of days for which the # certificate will be active. This certificate will expire in # 10 years. Once the backend API allows creation of certs which # do not expire, the following constant should be removed. CERT_NUMBER_OF_DAYS = 3650 CSR_REQUEST = ("" "CNmetadata.nsx.local" "" "OOrganization" "OUUnit" "LLocality" "STState" "CUS" "RSA2048" "") # Reserved IPs that cannot overlap defined subnets RESERVED_IPS = ["169.254.128.0/17", "169.254.1.0/24", "169.254.64.192/26"] # VPNaaS constants ENCRYPTION_ALGORITHM_MAP = { '3des': '3des', 'aes-128': 'aes', 'aes-256': 'aes256' } PFS_MAP = { 'group2': 'dh2', 'group5': 'dh5' } TRANSFORM_PROTOCOL_ALLOWED = ('esp',) ENCAPSULATION_MODE_ALLOWED = ('tunnel',) vmware-nsx-12.0.1/vmware_nsx/common/__init__.py0000666000175100017510000000000013244523345021515 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/common/managers.py0000666000175100017510000001350713244523345021573 0ustar zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_utils import excutils import stevedore LOG = log.getLogger(__name__) class ExtensionManager(stevedore.named.NamedExtensionManager): """Manage extension drivers using drivers.""" def __init__(self, extension_drivers=None): # Ordered list of extension drivers, defining # the order in which the drivers are called. self.ordered_ext_drivers = [] if extension_drivers is None: extension_drivers = cfg.CONF.nsx_extension_drivers LOG.info("Configured extension driver names: %s", extension_drivers) super(ExtensionManager, self).__init__('vmware_nsx.extension_drivers', extension_drivers, invoke_on_load=True, name_order=True) LOG.info("Loaded extension driver names: %s", self.names()) self._register_drivers() def _register_drivers(self): """Register all extension drivers. This method should only be called once in the ExtensionManager constructor. """ for ext in self: self.ordered_ext_drivers.append(ext) LOG.info("Registered extension drivers: %s", [driver.name for driver in self.ordered_ext_drivers]) def initialize(self): # Initialize each driver in the list. for driver in self.ordered_ext_drivers: LOG.info("Initializing extension driver '%s'", driver.name) driver.obj.initialize() def extension_aliases(self): exts = [] for driver in self.ordered_ext_drivers: alias = driver.obj.extension_alias if alias: exts.append(alias) LOG.info("Got %(alias)s extension from driver '%(drv)s'", {'alias': alias, 'drv': driver.name}) return exts def _call_on_ext_drivers(self, method_name, plugin_context, data, result): """Helper method for calling a method across all extension drivers.""" for driver in self.ordered_ext_drivers: try: getattr(driver.obj, method_name)(plugin_context, data, result) except Exception: with excutils.save_and_reraise_exception(): LOG.info("Extension driver '%(name)s' failed in " "%(method)s", {'name': driver.name, 'method': method_name}) def process_create_network(self, plugin_context, data, result): """Notify all extension drivers during network creation.""" self._call_on_ext_drivers("process_create_network", plugin_context, data, result) def process_update_network(self, plugin_context, data, result): """Notify all extension drivers during network update.""" self._call_on_ext_drivers("process_update_network", plugin_context, data, result) def process_create_subnet(self, plugin_context, data, result): """Notify all extension drivers during subnet creation.""" self._call_on_ext_drivers("process_create_subnet", plugin_context, data, result) def process_update_subnet(self, plugin_context, data, result): """Notify all extension drivers during subnet update.""" self._call_on_ext_drivers("process_update_subnet", plugin_context, data, result) def process_create_port(self, plugin_context, data, result): """Notify all extension drivers during port creation.""" self._call_on_ext_drivers("process_create_port", plugin_context, data, result) def process_update_port(self, plugin_context, data, result): """Notify all extension drivers during port update.""" self._call_on_ext_drivers("process_update_port", plugin_context, data, result) def _call_on_dict_driver(self, method_name, session, base_model, result): for driver in self.ordered_ext_drivers: try: getattr(driver.obj, method_name)(session, base_model, result) except Exception: LOG.error("Extension driver '%(name)s' failed in " "%(method)s", {'name': driver.name, 'method': method_name}) raise def extend_network_dict(self, session, base_model, result): """Notify all extension drivers to extend network dictionary.""" self._call_on_dict_driver("extend_network_dict", session, base_model, result) def extend_subnet_dict(self, session, base_model, result): """Notify all extension drivers to extend subnet dictionary.""" self._call_on_dict_driver("extend_subnet_dict", session, base_model, result) def extend_port_dict(self, session, base_model, result): """Notify all extension drivers to extend port dictionary.""" self._call_on_dict_driver("extend_port_dict", session, base_model, result) vmware-nsx-12.0.1/vmware_nsx/common/nsx_utils.py0000666000175100017510000003204113244523345022020 0ustar zuulzuul00000000000000# Copyright 2013 VMware Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api import validators from neutron_lib import constants from neutron_lib import exceptions as n_exc from oslo_log import log import six from vmware_nsx.api_client import client from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import utils as vmw_utils from vmware_nsx.db import db as nsx_db from vmware_nsx.db import networkgw_db from vmware_nsx import nsx_cluster from vmware_nsx.nsxlib.mh import l2gateway as l2gwlib from vmware_nsx.nsxlib.mh import router as routerlib from vmware_nsx.nsxlib.mh import secgroup as secgrouplib from vmware_nsx.nsxlib.mh import switch as switchlib LOG = log.getLogger(__name__) def fetch_nsx_switches(session, cluster, neutron_net_id): """Retrieve logical switches for a neutron network. This function is optimized for fetching all the lswitches always with a single NSX query. If there is more than 1 logical switch (chained switches use case) NSX lswitches are queried by 'quantum_net_id' tag. Otherwise the NSX lswitch is directly retrieved by id (more efficient). """ nsx_switch_ids = get_nsx_switch_ids(session, cluster, neutron_net_id) if len(nsx_switch_ids) > 1: lswitches = switchlib.get_lswitches(cluster, neutron_net_id) else: lswitches = [switchlib.get_lswitch_by_id( cluster, nsx_switch_ids[0])] return lswitches def get_nsx_switch_ids(session, cluster, neutron_network_id): """Return the NSX switch id for a given neutron network. First lookup for mappings in Neutron database. If no mapping is found, query the NSX backend and add the mappings. """ nsx_switch_ids = nsx_db.get_nsx_switch_ids( session, neutron_network_id) if not nsx_switch_ids: # Find logical switches from backend. # This is a rather expensive query, but it won't be executed # more than once for each network in Neutron's lifetime nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id) if not nsx_switches: LOG.warning("Unable to find NSX switches for Neutron network " "%s", neutron_network_id) return nsx_switch_ids = [] with session.begin(subtransactions=True): for nsx_switch in nsx_switches: nsx_switch_id = nsx_switch['uuid'] nsx_switch_ids.append(nsx_switch_id) # Create DB mapping nsx_db.add_neutron_nsx_network_mapping( session, neutron_network_id, nsx_switch_id) return nsx_switch_ids def get_nsx_switch_and_port_id(session, cluster, neutron_port_id): """Return the NSX switch and port uuids for a given neutron port. First, look up the Neutron database. If not found, execute a query on NSX platform as the mapping might be missing because the port was created before upgrading to grizzly. This routine also retrieves the identifier of the logical switch in the backend where the port is plugged. Prior to Icehouse this information was not available in the Neutron Database. For dealing with pre-existing records, this routine will query the backend for retrieving the correct switch identifier. As of Icehouse release it is not indeed anymore possible to assume the backend logical switch identifier is equal to the neutron network identifier. """ nsx_switch_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id( session, neutron_port_id) if not nsx_switch_id: # Find logical switch for port from backend # This is a rather expensive query, but it won't be executed # more than once for each port in Neutron's lifetime nsx_ports = switchlib.query_lswitch_lports( cluster, '*', relations='LogicalSwitchConfig', filters={'tag': neutron_port_id, 'tag_scope': 'q_port_id'}) # Only one result expected # NOTE(salv-orlando): Not handling the case where more than one # port is found with the same neutron port tag if not nsx_ports: LOG.warning("Unable to find NSX port for Neutron port %s", neutron_port_id) # This method is supposed to return a tuple return None, None nsx_port = nsx_ports[0] nsx_switch_id = (nsx_port['_relations'] ['LogicalSwitchConfig']['uuid']) if nsx_port_id: # Mapping already exists. Delete before recreating nsx_db.delete_neutron_nsx_port_mapping( session, neutron_port_id) else: nsx_port_id = nsx_port['uuid'] # (re)Create DB mapping nsx_db.add_neutron_nsx_port_mapping( session, neutron_port_id, nsx_switch_id, nsx_port_id) return nsx_switch_id, nsx_port_id def get_nsx_security_group_id(session, cluster, neutron_id): """Return the NSX sec profile uuid for a given neutron sec group. First, look up the Neutron database. If not found, execute a query on NSX platform as the mapping might be missing. NOTE: Security groups are called 'security profiles' on the NSX backend. """ nsx_id = nsx_db.get_nsx_security_group_id(session, neutron_id) if not nsx_id: # Find security profile on backend. # This is a rather expensive query, but it won't be executed # more than once for each security group in Neutron's lifetime nsx_sec_profiles = secgrouplib.query_security_profiles( cluster, '*', filters={'tag': neutron_id, 'tag_scope': 'q_sec_group_id'}) # Only one result expected # NOTE(salv-orlando): Not handling the case where more than one # security profile is found with the same neutron port tag if not nsx_sec_profiles: LOG.warning("Unable to find NSX security profile for Neutron " "security group %s", neutron_id) return elif len(nsx_sec_profiles) > 1: LOG.warning("Multiple NSX security profiles found for Neutron " "security group %s", neutron_id) nsx_sec_profile = nsx_sec_profiles[0] nsx_id = nsx_sec_profile['uuid'] with session.begin(subtransactions=True): # Create DB mapping nsx_db.add_neutron_nsx_security_group_mapping( session, neutron_id, nsx_id) return nsx_id def get_nsx_router_id(session, cluster, neutron_router_id): """Return the NSX router uuid for a given neutron router. First, look up the Neutron database. If not found, execute a query on NSX platform as the mapping might be missing. """ if not neutron_router_id: return nsx_router_id = nsx_db.get_nsx_router_id( session, neutron_router_id) if not nsx_router_id: # Find logical router from backend. # This is a rather expensive query, but it won't be executed # more than once for each router in Neutron's lifetime nsx_routers = routerlib.query_lrouters( cluster, '*', filters={'tag': neutron_router_id, 'tag_scope': 'q_router_id'}) # Only one result expected # NOTE(salv-orlando): Not handling the case where more than one # port is found with the same neutron port tag if not nsx_routers: LOG.warning("Unable to find NSX router for Neutron router %s", neutron_router_id) return nsx_router = nsx_routers[0] nsx_router_id = nsx_router['uuid'] with session.begin(subtransactions=True): # Create DB mapping nsx_db.add_neutron_nsx_router_mapping( session, neutron_router_id, nsx_router_id) return nsx_router_id def create_nsx_cluster(cluster_opts, concurrent_connections, gen_timeout): cluster = nsx_cluster.NSXCluster(**cluster_opts) def _ctrl_split(x, y): return (x, int(y), True) api_providers = [_ctrl_split(*ctrl.split(':')) for ctrl in cluster.nsx_controllers] cluster.api_client = client.NsxApiClient( api_providers, cluster.nsx_user, cluster.nsx_password, http_timeout=cluster.http_timeout, retries=cluster.retries, redirects=cluster.redirects, concurrent_connections=concurrent_connections, gen_timeout=gen_timeout) return cluster def get_nsx_device_status(cluster, nsx_uuid): try: status_up = l2gwlib.get_gateway_device_status( cluster, nsx_uuid) if status_up: return networkgw_db.STATUS_ACTIVE else: return networkgw_db.STATUS_DOWN except api_exc.NsxApiException: return networkgw_db.STATUS_UNKNOWN except n_exc.NotFound: return networkgw_db.ERROR def get_nsx_device_statuses(cluster, tenant_id): try: status_dict = l2gwlib.get_gateway_devices_status( cluster, tenant_id) return dict((nsx_device_id, networkgw_db.STATUS_ACTIVE if connected else networkgw_db.STATUS_DOWN) for (nsx_device_id, connected) in six.iteritems(status_dict)) except api_exc.NsxApiException: # Do not make a NSX API exception fatal if tenant_id: LOG.warning("Unable to retrieve operational status for " "gateway devices belonging to tenant: %s", tenant_id) else: LOG.warning("Unable to retrieve operational status for " "gateway devices") def _convert_bindings_to_nsx_transport_zones(bindings): nsx_transport_zones_config = [] for binding in bindings: transport_entry = {} if binding.binding_type in [vmw_utils.NetworkTypes.FLAT, vmw_utils.NetworkTypes.VLAN]: transport_entry['transport_type'] = ( vmw_utils.NetworkTypes.BRIDGE) transport_entry['binding_config'] = {} vlan_id = binding.vlan_id if vlan_id: transport_entry['binding_config'] = ( {'vlan_translation': [{'transport': vlan_id}]}) else: transport_entry['transport_type'] = binding.binding_type transport_entry['zone_uuid'] = binding.phy_uuid nsx_transport_zones_config.append(transport_entry) return nsx_transport_zones_config def _convert_segments_to_nsx_transport_zones(segments, default_tz_uuid): nsx_transport_zones_config = [] for transport_zone in segments: for value in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID]: if transport_zone.get(value) == constants.ATTR_NOT_SPECIFIED: transport_zone[value] = None transport_entry = {} transport_type = transport_zone.get(pnet.NETWORK_TYPE) if transport_type in [vmw_utils.NetworkTypes.FLAT, vmw_utils.NetworkTypes.VLAN]: transport_entry['transport_type'] = ( vmw_utils.NetworkTypes.BRIDGE) transport_entry['binding_config'] = {} vlan_id = transport_zone.get(pnet.SEGMENTATION_ID) if vlan_id: transport_entry['binding_config'] = ( {'vlan_translation': [{'transport': vlan_id}]}) else: transport_entry['transport_type'] = transport_type transport_entry['zone_uuid'] = ( transport_zone[pnet.PHYSICAL_NETWORK] or default_tz_uuid) nsx_transport_zones_config.append(transport_entry) return nsx_transport_zones_config def convert_to_nsx_transport_zones( default_tz_uuid, network=None, bindings=None, default_transport_type=None): # Convert fields from provider request to nsx format if (network and not validators.is_attr_set( network.get(mpnet_apidef.SEGMENTS))): return [{"zone_uuid": default_tz_uuid, "transport_type": default_transport_type}] # Convert fields from db to nsx format if bindings: return _convert_bindings_to_nsx_transport_zones(bindings) # If we end up here we need to convert multiprovider segments into nsx # transport zone configurations return _convert_segments_to_nsx_transport_zones( network.get(mpnet_apidef.SEGMENTS), default_tz_uuid) vmware-nsx-12.0.1/vmware_nsx/common/locking.py0000666000175100017510000000476413244523345021431 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import traceback from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log from tooz import coordination LOG = log.getLogger(__name__) class LockManager(object): _coordinator = None _coordinator_pid = None _connect_string = cfg.CONF.locking_coordinator_url def __init__(self): LOG.debug('LockManager initialized!') @staticmethod def get_lock(name, **kwargs): if cfg.CONF.locking_coordinator_url: lck = LockManager._get_lock_distributed(name) LOG.debug('Lock %s taken with stack trace %s', name, traceback.extract_stack()) return lck else: # Ensure that external=True kwargs['external'] = True lck = LockManager._get_lock_local(name, **kwargs) LOG.debug('Lock %s taken with stack trace %s', name, traceback.extract_stack()) return lck @staticmethod def _get_lock_local(name, **kwargs): return lockutils.lock(name, **kwargs) @staticmethod def _get_lock_distributed(name): if LockManager._coordinator_pid != os.getpid(): # We should use a per-process coordinator. If PID is different # start a new coordinator. # While the API workers are spawned, we have to re-initialize # a coordinator, so we validate that the PID is still the same. LockManager._coordinator_pid = os.getpid() LOG.debug('Initialized coordinator with connect string %s', LockManager._connect_string) LockManager._coordinator = coordination.get_coordinator( LockManager._connect_string, 'vmware-neutron-plugin') LockManager._coordinator.start() LOG.debug('Retrieved lock for %s', name) return LockManager._coordinator.get_lock(name) vmware-nsx-12.0.1/vmware_nsx/common/securitygroups.py0000666000175100017510000001214613244523345023103 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import six from vmware_nsx.common import nsx_utils LOG = log.getLogger(__name__) # Protocol number look up for supported protocols protocol_num_look_up = {'tcp': 6, 'icmp': 1, 'udp': 17, 'ipv6-icmp': 58} def _convert_to_nsx_rule(session, cluster, rule, with_id=False): """Converts a Neutron security group rule to the NSX format. This routine also replaces Neutron IDs with NSX UUIDs. """ nsx_rule = {} params = ['remote_ip_prefix', 'protocol', 'remote_group_id', 'port_range_min', 'port_range_max', 'ethertype'] if with_id: params.append('id') for param in params: value = rule.get(param) if param not in rule: nsx_rule[param] = value elif not value: pass elif param == 'remote_ip_prefix': nsx_rule['ip_prefix'] = rule['remote_ip_prefix'] elif param == 'remote_group_id': nsx_rule['profile_uuid'] = nsx_utils.get_nsx_security_group_id( session, cluster, rule['remote_group_id']) elif param == 'protocol': try: nsx_rule['protocol'] = int(rule['protocol']) except (ValueError, TypeError): nsx_rule['protocol'] = ( protocol_num_look_up[rule['protocol']]) else: nsx_rule[param] = value return nsx_rule def _convert_to_nsx_rules(session, cluster, rules, with_id=False): """Converts a list of Neutron security group rules to the NSX format.""" nsx_rules = {'logical_port_ingress_rules': [], 'logical_port_egress_rules': []} for direction in ['logical_port_ingress_rules', 'logical_port_egress_rules']: for rule in rules[direction]: nsx_rules[direction].append( _convert_to_nsx_rule(session, cluster, rule, with_id)) return nsx_rules def get_security_group_rules_nsx_format(session, cluster, security_group_rules, with_id=False): """Convert neutron security group rules into NSX format. This routine splits Neutron security group rules into two lists, one for ingress rules and the other for egress rules. """ def fields(rule): _fields = ['remote_ip_prefix', 'remote_group_id', 'protocol', 'port_range_min', 'port_range_max', 'protocol', 'ethertype'] if with_id: _fields.append('id') return dict((k, v) for k, v in six.iteritems(rule) if k in _fields) ingress_rules = [] egress_rules = [] for rule in security_group_rules: if rule.get('souce_group_id'): rule['remote_group_id'] = nsx_utils.get_nsx_security_group_id( session, cluster, rule['remote_group_id']) if rule['direction'] == 'ingress': ingress_rules.append(fields(rule)) elif rule['direction'] == 'egress': egress_rules.append(fields(rule)) rules = {'logical_port_ingress_rules': egress_rules, 'logical_port_egress_rules': ingress_rules} return _convert_to_nsx_rules(session, cluster, rules, with_id) def merge_security_group_rules_with_current(session, cluster, new_rules, current_rules): merged_rules = get_security_group_rules_nsx_format( session, cluster, current_rules) for new_rule in new_rules: rule = new_rule['security_group_rule'] if rule['direction'] == 'ingress': merged_rules['logical_port_egress_rules'].append( _convert_to_nsx_rule(session, cluster, rule)) elif rule['direction'] == 'egress': merged_rules['logical_port_ingress_rules'].append( _convert_to_nsx_rule(session, cluster, rule)) return merged_rules def remove_security_group_with_id_and_id_field(rules, rule_id): """Remove rule by rule_id. This function receives all of the current rule associated with a security group and then removes the rule that matches the rule_id. In addition it removes the id field in the dict with each rule since that should not be passed to nsx. """ for rule_direction in rules.values(): item_to_remove = None for port_rule in rule_direction: if port_rule['id'] == rule_id: item_to_remove = port_rule else: # remove key from dictionary for NSX del port_rule['id'] if item_to_remove: rule_direction.remove(item_to_remove) vmware-nsx-12.0.1/vmware_nsx/common/exceptions.py0000666000175100017510000001564113244523345022160 0ustar zuulzuul00000000000000# Copyright 2012 VMware, Inc # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from vmware_nsx._i18n import _ class NsxPluginException(n_exc.NeutronException): message = _("An unexpected error occurred in the NSX Plugin: %(err_msg)s") class ClientCertificateException(NsxPluginException): message = _("Client certificate error: %(err_msg)s") class InvalidVersion(NsxPluginException): message = _("Unable to fulfill request with version %(version)s.") class InvalidConnection(NsxPluginException): message = _("Invalid NSX connection parameters: %(conn_params)s") class InvalidClusterConfiguration(NsxPluginException): message = _("Invalid cluster values: %(invalid_attrs)s. Please ensure " "that these values are specified in the [DEFAULT] " "section of the NSX plugin ini file.") class InvalidNovaZone(NsxPluginException): message = _("Unable to find cluster config entry " "for nova zone: %(nova_zone)s") class NoMorePortsException(NsxPluginException): message = _("Unable to create port on network %(network)s. " "Maximum number of ports reached") class NatRuleMismatch(NsxPluginException): message = _("While retrieving NAT rules, %(actual_rules)s were found " "whereas rules in the (%(min_rules)s,%(max_rules)s) interval " "were expected") class InvalidAttachmentType(NsxPluginException): message = _("Invalid NSX attachment type '%(attachment_type)s'") class MaintenanceInProgress(NsxPluginException): message = _("The networking backend is currently in maintenance mode and " "therefore unable to accept requests which modify its state. " "Please try later.") class L2GatewayAlreadyInUse(n_exc.Conflict): message = _("Gateway Service %(gateway)s is already in use") class InvalidTransportType(NsxPluginException): message = _("The transport type %(transport_type)s is not recognized " "by the backend") class InvalidSecurityCertificate(NsxPluginException): message = _("An invalid security certificate was specified for the " "gateway device. Certificates must be enclosed between " "'-----BEGIN CERTIFICATE-----' and " "'-----END CERTIFICATE-----'") class ServiceOverQuota(n_exc.Conflict): message = _("Quota exceeded for NSX resource %(overs)s: %(err_msg)s") class PortConfigurationError(NsxPluginException): message = _("An error occurred while connecting LSN %(lsn_id)s " "and network %(net_id)s via port %(port_id)s") def __init__(self, **kwargs): super(PortConfigurationError, self).__init__(**kwargs) self.port_id = kwargs.get('port_id') class LogicalRouterNotFound(n_exc.NotFound): message = _('Unable to find logical router for %(entity_id)s') class LsnNotFound(n_exc.NotFound): message = _('Unable to find LSN for %(entity)s %(entity_id)s') class LsnPortNotFound(n_exc.NotFound): message = (_('Unable to find port for LSN %(lsn_id)s ' 'and %(entity)s %(entity_id)s')) class LsnMigrationConflict(n_exc.Conflict): message = _("Unable to migrate network '%(net_id)s' to LSN: %(reason)s") class LsnConfigurationConflict(NsxPluginException): message = _("Configuration conflict on Logical Service Node %(lsn_id)s") class DvsNotFound(n_exc.NotFound): message = _('Unable to find DVS %(dvs)s') class NoRouterAvailable(n_exc.ResourceExhausted): message = _("Unable to create the router. " "No tenant router is available for allocation.") class NsxL2GWConnectionMappingNotFound(n_exc.NotFound): message = _('Unable to find mapping for L2 gateway connection: %(conn)s') class NsxL2GWDeviceNotFound(n_exc.NotFound): message = _('Unable to find logical L2 gateway device.') class NsxL2GWInUse(n_exc.InUse): message = _("L2 Gateway '%(gateway_id)s' has been used") class InvalidIPAddress(n_exc.InvalidInput): message = _("'%(ip_address)s' must be a /32 CIDR based IPv4 address") class SecurityGroupMaximumCapacityReached(NsxPluginException): pass class NsxResourceNotFound(n_exc.NotFound): message = _("%(res_name)s %(res_id)s not found on the backend.") class NsxAZResourceNotFound(NsxResourceNotFound): message = _("Availability zone %(res_name)s %(res_id)s not found on the " "backend.") class NsxQosPolicyMappingNotFound(n_exc.NotFound): message = _('Unable to find mapping for QoS policy: %(policy)s') class NumberOfNsgroupCriteriaTagsReached(NsxPluginException): message = _("Port can be associated with at most %(max_num)s " "security-groups.") class NsxTaaSDriverException(NsxPluginException): message = _("Tap-as-a-Service NSX driver exception: %(msg)s.") class NsxPortMirrorSessionMappingNotFound(n_exc.NotFound): message = _("Unable to find mapping for Tap Flow: %(tf)s") class NsxInvalidConfiguration(n_exc.InvalidConfigurationOption): message = _("An invalid value was provided for %(opt_name)s: " "%(opt_value)s: %(reason)s") class NsxBgpSpeakerUnableToAddGatewayNetwork(n_exc.BadRequest): message = _("Unable to add gateway network %(network_id)s to BGP speaker " "%(bgp_speaker_id)s, network must have association with an " "address-scope and can be associated with one BGP speaker at " "most.") class NsxBgpNetworkNotExternal(n_exc.BadRequest): message = _("Network %(net_id)s is not external, only external network " "can be associated with a BGP speaker.") class NsxBgpGatewayNetworkHasNoSubnets(n_exc.BadRequest): message = _("Can't associate external network %(net_id)s with BGP " "speaker, network doesn't contain any subnets.") class NsxRouterInterfaceDoesNotMatchAddressScope(n_exc.BadRequest): message = _("Unable to update no-NAT router %(router_id)s, " "only subnets allocated from address-scope " "%(address_scope_id)s can be connected.") class NsxVpnValidationError(NsxPluginException): message = _("Invalid VPN configuration: %(details)s") class NsxIPsecVpnMappingNotFound(n_exc.NotFound): message = _("Unable to find mapping for ipsec site connection: %(conn)s") class NsxENSPortSecurity(n_exc.BadRequest): message = _("Port security is not supported on ENS Transport zones") vmware-nsx-12.0.1/vmware_nsx/common/l3_rpc_agent_api.py0000666000175100017510000000303513244523345023162 0ustar zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. class L3NotifyAPI(object): """Dummy driver for L3 notifcations - no need - no L3 agenets.""" # We need this driver as this code is invoked from the L3 mixin code. def agent_updated(self, context, admin_state_up, host): pass def router_deleted(self, context, router_id): pass def routers_updated(self, context, router_ids, operation=None, data=None, shuffle_agents=False, schedule_routers=True): pass def add_arp_entry(self, context, router_id, arp_table, operation=None): pass def del_arp_entry(self, context, router_id, arp_table, operation=None): pass def delete_fipnamespace_for_ext_net(self, context, ext_net_id): pass def router_removed_from_agent(self, context, router_id, host): pass def router_added_to_agent(self, context, router_ids, host): pass def routers_updated_on_host(self, context, router_ids, host): pass vmware-nsx-12.0.1/vmware_nsx/extensions/0000775000175100017510000000000013244524600020316 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/extensions/dhcp_mtu.py0000666000175100017510000000313013244523345022477 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from neutron_lib import constants DHCP_MTU = 'dhcp_mtu' EXTENDED_ATTRIBUTES_2_0 = { 'subnets': { DHCP_MTU: { 'allow_post': True, 'allow_put': True, 'default': constants.ATTR_NOT_SPECIFIED, # This is the legal range for the backend MTU 'validate': {'type:range': (68, 65535)}, 'is_visible': True}, } } class Dhcp_mtu(extensions.ExtensionDescriptor): """Extension class supporting DHCP MTU for subnets.""" @classmethod def get_name(cls): return "DHCP MTU" @classmethod def get_alias(cls): return "dhcp-mtu" @classmethod def get_description(cls): return "Enable the ability to add DHCP MTU for Subnets" @classmethod def get_updated(cls): return "2016-7-21T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 return {} vmware-nsx-12.0.1/vmware_nsx/extensions/advancedserviceproviders.py0000666000175100017510000000277613244523345025777 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions # Attribute Map ADV_SERVICE_PROVIDERS = 'advanced_service_providers' EXTENDED_ATTRIBUTES_2_0 = { 'subnets': { ADV_SERVICE_PROVIDERS: {'allow_post': False, 'allow_put': False, 'is_visible': True, 'default': None}}} class Advancedserviceproviders(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Advanced Service Providers" @classmethod def get_alias(cls): return "advanced-service-providers" @classmethod def get_description(cls): return "Id of the advanced service providers attached to the subnet" @classmethod def get_updated(cls): return "2014-12-11T12:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} vmware-nsx-12.0.1/vmware_nsx/extensions/routertype.py0000666000175100017510000000343113244523345023122 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from neutron_lib import constants ROUTER_TYPE = 'router_type' VALID_TYPES = ['shared', 'exclusive'] EXTENDED_ATTRIBUTES_2_0 = { 'routers': { ROUTER_TYPE: {'allow_post': True, 'allow_put': True, 'validate': {'type:values': VALID_TYPES}, 'default': constants.ATTR_NOT_SPECIFIED, 'is_visible': True}, } } class Routertype(extensions.ExtensionDescriptor): """Extension class supporting router type.""" @classmethod def get_name(cls): return "Router Type" @classmethod def get_alias(cls): return "nsxv-router-type" @classmethod def get_description(cls): return "Enables configuration of NSXv router type." @classmethod def get_updated(cls): return "2015-1-12T10:00:00-00:00" def get_required_extensions(self): return ["router"] @classmethod def get_resources(cls): """Returns Ext Resources.""" return [] def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} vmware-nsx-12.0.1/vmware_nsx/extensions/vnicindex.py0000666000175100017510000000300113244523345022660 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import converters from neutron_lib.api import extensions # Attribute Map VNIC_INDEX = 'vnic_index' EXTENDED_ATTRIBUTES_2_0 = { 'ports': { VNIC_INDEX: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'convert_to': converters.convert_to_int_if_not_none}}} class Vnicindex(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "VNIC Index" @classmethod def get_alias(cls): return "vnic-index" @classmethod def get_description(cls): return ("Enable a port to be associated with a VNIC index") @classmethod def get_updated(cls): return "2014-09-15T12:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} vmware-nsx-12.0.1/vmware_nsx/extensions/lsn.py0000666000175100017510000000505313244523345021476 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron.api import extensions from neutron.api.v2 import base from neutron_lib.api import extensions as api_extensions from neutron_lib.plugins import directory EXT_ALIAS = 'lsn' COLLECTION_NAME = "%ss" % EXT_ALIAS RESOURCE_ATTRIBUTE_MAP = { COLLECTION_NAME: { 'network': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'is_visible': True}, 'report': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': None}, 'is_visible': True}, }, } class Lsn(api_extensions.ExtensionDescriptor): """Enable LSN configuration for Neutron NSX networks.""" @classmethod def get_name(cls): return "Logical Service Node configuration" @classmethod def get_alias(cls): return EXT_ALIAS @classmethod def get_description(cls): return "Enables configuration of NSX Logical Services Node." @classmethod def get_updated(cls): return "2013-10-05T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] plugin = directory.get_plugin() resource_name = EXT_ALIAS collection_name = resource_name.replace('_', '-') + "s" params = RESOURCE_ATTRIBUTE_MAP.get(COLLECTION_NAME, dict()) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=False) ex = extensions.ResourceExtension(collection_name, controller) exts.append(ex) return exts def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} vmware-nsx-12.0.1/vmware_nsx/extensions/networkgw.py0000666000175100017510000002240213244523345022726 0ustar zuulzuul00000000000000# Copyright 2013 VMware. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from oslo_config import cfg from neutron.api.v2 import resource_helper from neutron_lib.api import extensions from neutron_lib.api import validators from neutron_lib import constants from neutron_lib.db import constants as db_const from vmware_nsx._i18n import _ GATEWAY_RESOURCE_NAME = "network_gateway" DEVICE_RESOURCE_NAME = "gateway_device" # Use dash for alias and collection name EXT_ALIAS = GATEWAY_RESOURCE_NAME.replace('_', '-') NETWORK_GATEWAYS = "%ss" % EXT_ALIAS GATEWAY_DEVICES = "%ss" % DEVICE_RESOURCE_NAME.replace('_', '-') DEVICE_ID_ATTR = 'id' IFACE_NAME_ATTR = 'interface_name' # TODO(salv-orlando): This type definition is duplicated into # openstack/vmware-nsx. This temporary duplication should be removed once the # plugin decomposition is finished. # Allowed network types for the NSX Plugin class NetworkTypes(object): """Allowed provider network types for the NSX Plugin.""" L3_EXT = 'l3_ext' STT = 'stt' GRE = 'gre' FLAT = 'flat' VLAN = 'vlan' BRIDGE = 'bridge' # Attribute Map for Network Gateway Resource # TODO(salvatore-orlando): add admin state as other neutron resources RESOURCE_ATTRIBUTE_MAP = { NETWORK_GATEWAYS: { 'id': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': db_const.NAME_FIELD_SIZE}, 'is_visible': True, 'default': ''}, 'default': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'devices': {'allow_post': True, 'allow_put': False, 'validate': {'type:device_list': None}, 'is_visible': True}, 'ports': {'allow_post': False, 'allow_put': False, 'default': [], 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': db_const.PROJECT_ID_FIELD_SIZE}, 'required_by_policy': True, 'is_visible': True} }, GATEWAY_DEVICES: { 'id': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': db_const.NAME_FIELD_SIZE}, 'is_visible': True, 'default': ''}, 'client_certificate': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True}, 'connector_type': {'allow_post': True, 'allow_put': True, 'validate': {'type:connector_type': None}, 'is_visible': True}, 'connector_ip': {'allow_post': True, 'allow_put': True, 'validate': {'type:ip_address': None}, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': db_const.PROJECT_ID_FIELD_SIZE}, 'required_by_policy': True, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, } } def _validate_device_list(data, valid_values=None): """Validate the list of service definitions.""" if not data: # Devices must be provided msg = _("Cannot create a gateway with an empty device list") return msg try: for device in data: key_specs = {DEVICE_ID_ATTR: {'type:regex': constants.UUID_PATTERN, 'required': True}, IFACE_NAME_ATTR: {'type:string': None, 'required': False}} err_msg = validators.validate_dict( device, key_specs=key_specs) if err_msg: return err_msg unexpected_keys = [key for key in device if key not in key_specs] if unexpected_keys: err_msg = (_("Unexpected keys found in device description:%s") % ",".join(unexpected_keys)) return err_msg except TypeError: return (_("%s: provided data are not iterable") % _validate_device_list.__name__) def _validate_connector_type(data, valid_values=None): if not data: # A connector type is compulsory msg = _("A connector type is required to create a gateway device") return msg connector_types = (valid_values if valid_values else [NetworkTypes.GRE, NetworkTypes.STT, NetworkTypes.BRIDGE, 'ipsec%s' % NetworkTypes.GRE, 'ipsec%s' % NetworkTypes.STT]) if data not in connector_types: msg = _("Unknown connector type: %s") % data return msg nw_gw_quota_opts = [ cfg.IntOpt('quota_network_gateway', default=5, help=_('Number of network gateways allowed per tenant, ' '-1 for unlimited')) ] cfg.CONF.register_opts(nw_gw_quota_opts, 'QUOTAS') validators.add_validator('device_list', _validate_device_list) validators.add_validator('connector_type', _validate_connector_type) class Networkgw(extensions.ExtensionDescriptor): """API extension for Layer-2 Gateway support. The Layer-2 gateway feature allows for connecting neutron networks with external networks at the layer-2 level. No assumption is made on the location of the external network, which might not even be directly reachable from the hosts where the VMs are deployed. This is achieved by instantiating 'network gateways', and then connecting Neutron network to them. """ @classmethod def get_name(cls): return "Network Gateway" @classmethod def get_alias(cls): return EXT_ALIAS @classmethod def get_description(cls): return "Connects Neutron networks with external networks at layer 2." @classmethod def get_updated(cls): return "2014-01-01T00:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" member_actions = { GATEWAY_RESOURCE_NAME.replace('_', '-'): { 'connect_network': 'PUT', 'disconnect_network': 'PUT'}} plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) return resource_helper.build_resource_info(plural_mappings, RESOURCE_ATTRIBUTE_MAP, None, action_map=member_actions, register_quota=True, translate_name=True) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} class NetworkGatewayPluginBase(object): @abc.abstractmethod def create_network_gateway(self, context, network_gateway): pass @abc.abstractmethod def update_network_gateway(self, context, id, network_gateway): pass @abc.abstractmethod def get_network_gateway(self, context, id, fields=None): pass @abc.abstractmethod def delete_network_gateway(self, context, id): pass @abc.abstractmethod def get_network_gateways(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def connect_network(self, context, network_gateway_id, network_mapping_info): pass @abc.abstractmethod def disconnect_network(self, context, network_gateway_id, network_mapping_info): pass @abc.abstractmethod def create_gateway_device(self, context, gateway_device): pass @abc.abstractmethod def update_gateway_device(self, context, id, gateway_device): pass @abc.abstractmethod def delete_gateway_device(self, context, id): pass @abc.abstractmethod def get_gateway_device(self, context, id, fields=None): pass @abc.abstractmethod def get_gateway_devices(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass vmware-nsx-12.0.1/vmware_nsx/extensions/providersecuritygroup.py0000666000175100017510000000572513244523345025407 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import converters from neutron_lib.api import extensions from neutron_lib import constants from neutron_lib import exceptions as nexception from vmware_nsx._i18n import _ PROVIDER = 'provider' PROVIDER_SECURITYGROUPS = 'provider_security_groups' EXTENDED_ATTRIBUTES_2_0 = { 'security_groups': { PROVIDER: { 'allow_post': True, 'allow_put': False, 'convert_to': converters.convert_to_boolean, 'default': False, 'enforce_policy': True, 'is_visible': True} }, 'ports': {PROVIDER_SECURITYGROUPS: { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'convert_to': converters.convert_none_to_empty_list, 'validate': {'type:uuid_list': None}, 'default': constants.ATTR_NOT_SPECIFIED} } } NUM_PROVIDER_SGS_ON_PORT = 1 class SecurityGroupNotProvider(nexception.InvalidInput): message = _("Security group %(id)s is not a provider security group.") class SecurityGroupIsProvider(nexception.InvalidInput): message = _("Security group %(id)s is a provider security group and " "cannot be specified via the security group field.") class DefaultSecurityGroupIsNotProvider(nexception.InvalidInput): message = _("Can't create default security-group as a provider " "security-group.") class ProviderSecurityGroupDeleteNotAdmin(nexception.NotAuthorized): message = _("Security group %(id)s is a provider security group and " "requires an admin to delete it.") class Providersecuritygroup(extensions.ExtensionDescriptor): """Provider security-group extension.""" @classmethod def get_name(cls): return "Provider security group" @classmethod def get_alias(cls): return "provider-security-group" @classmethod def get_description(cls): return "Admin controlled security groups with blocking rules." @classmethod def get_updated(cls): return "2016-07-13T10:00:00-00:00" def get_required_extensions(self): return ["security-group"] @classmethod def get_resources(cls): """Returns Ext Resources.""" return [] def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} vmware-nsx-12.0.1/vmware_nsx/extensions/qos_queue.py0000666000175100017510000001760613244523345022717 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from neutron.api import extensions from neutron.api.v2 import base from neutron_lib.api import converters from neutron_lib.api import extensions as api_extensions from neutron_lib.db import constants as db_const from neutron_lib import exceptions as nexception from neutron_lib.plugins import directory from vmware_nsx._i18n import _ # For policy.json/Auth qos_queue_create = "create_qos_queue" qos_queue_delete = "delete_qos_queue" qos_queue_get = "get_qos_queue" qos_queue_list = "get_qos_queues" class DefaultQueueCreateNotAdmin(nexception.InUse): message = _("Need to be admin in order to create queue called default") class DefaultQueueAlreadyExists(nexception.InUse): message = _("Default queue already exists.") class QueueInvalidDscp(nexception.InvalidInput): message = _("Invalid value for dscp %(data)s must be integer value" " between 0 and 63.") class QueueInvalidMarking(nexception.InvalidInput): message = _("The qos marking cannot be set to 'trusted' " "when the DSCP field is set") class QueueMinGreaterMax(nexception.InvalidInput): message = _("Invalid bandwidth rate, min greater than max.") class QueueInvalidBandwidth(nexception.InvalidInput): message = _("Invalid bandwidth rate, %(data)s must be a non negative" " integer.") class QueueNotFound(nexception.NotFound): message = _("Queue %(id)s does not exist") class QueueInUseByPort(nexception.InUse): message = _("Unable to delete queue attached to port.") class QueuePortBindingNotFound(nexception.NotFound): message = _("Port is not associated with lqueue") def convert_to_unsigned_int_or_none(val): if val is None: return try: val = int(val) if val < 0: raise ValueError() except (ValueError, TypeError): msg = _("'%s' must be a non negative integer.") % val raise nexception.InvalidInput(error_message=msg) return val def convert_to_unsigned_int_or_none_max_63(val): val = convert_to_unsigned_int_or_none(val) if val > 63: raise QueueInvalidDscp(data=val) return val # As per NSX API, if a queue is trusted, DSCP must be omitted; if a queue is # untrusted, DSCP must be specified. Whichever default values we choose for # the tuple (qos_marking, dscp), there will be at least one combination of a # request with conflicting values: for instance given the default values below, # requests with qos_marking = 'trusted' and the default dscp value will fail. # In order to avoid API users to explicitly specify a setting for clearing # the DSCP field when a trusted queue is created, the code serving this API # will adopt the following behaviour when qos_marking is set to 'trusted': # - if the DSCP attribute is set to the default value (0), silently drop # its value # - if the DSCP attribute is set to anything than 0 (but still a valid DSCP # value) return a 400 error as qos_marking and DSCP setting conflict. # TODO(salv-orlando): Evaluate whether it will be possible from a backward # compatibility perspective to change the default value for DSCP in order to # avoid this peculiar behaviour RESOURCE_ATTRIBUTE_MAP = { 'qos_queues': { 'id': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'default': {'allow_post': True, 'allow_put': False, 'convert_to': converters.convert_to_boolean, 'is_visible': True, 'default': False}, 'name': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': db_const.NAME_FIELD_SIZE}, 'is_visible': True, 'default': ''}, 'min': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'max': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'convert_to': convert_to_unsigned_int_or_none}, 'qos_marking': {'allow_post': True, 'allow_put': False, 'validate': {'type:values': ['untrusted', 'trusted']}, 'default': 'untrusted', 'is_visible': True}, 'dscp': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none_max_63}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': { 'type:string': db_const.PROJECT_ID_FIELD_SIZE}, 'is_visible': True}, }, } QUEUE = 'queue_id' RXTX_FACTOR = 'rxtx_factor' EXTENDED_ATTRIBUTES_2_0 = { 'ports': { RXTX_FACTOR: {'allow_post': True, # FIXME(arosen): the plugin currently does not # implement updating rxtx factor on port. 'allow_put': True, 'is_visible': False, 'default': 1, 'enforce_policy': True, 'convert_to': converters.convert_to_positive_float_or_none}, QUEUE: {'allow_post': False, 'allow_put': False, 'is_visible': True, 'default': False, 'enforce_policy': True}}, 'networks': {QUEUE: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': False, 'enforce_policy': True}} } class Qos_queue(api_extensions.ExtensionDescriptor): """Port Queue extension.""" @classmethod def get_name(cls): return "QoS Queue" @classmethod def get_alias(cls): return "qos-queue" @classmethod def get_description(cls): return "NSX QoS extension." @classmethod def get_updated(cls): return "2014-01-01T00:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] plugin = directory.get_plugin() resource_name = 'qos_queue' collection_name = resource_name.replace('_', '-') + "s" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict()) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=False) ex = extensions.ResourceExtension(collection_name, controller) exts.append(ex) return exts def get_extended_resources(self, version): if version == "2.0": return dict(list(EXTENDED_ATTRIBUTES_2_0.items()) + list(RESOURCE_ATTRIBUTE_MAP.items())) else: return {} class QueuePluginBase(object): @abc.abstractmethod def create_qos_queue(self, context, queue): pass @abc.abstractmethod def delete_qos_queue(self, context, id): pass @abc.abstractmethod def get_qos_queue(self, context, id, fields=None): pass @abc.abstractmethod def get_qos_queues(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass vmware-nsx-12.0.1/vmware_nsx/extensions/securitygrouplogging.py0000666000175100017510000000363613244523345025202 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import converters from neutron_lib.api import extensions LOGGING = 'logging' RESOURCE_ATTRIBUTE_MAP = { 'security_groups': { LOGGING: { 'allow_post': True, 'allow_put': True, 'convert_to': converters.convert_to_boolean, 'default': False, 'enforce_policy': True, 'is_visible': True} } } class Securitygrouplogging(extensions.ExtensionDescriptor): """Security group logging extension.""" @classmethod def get_name(cls): return "Security group logging" @classmethod def get_alias(cls): return "security-group-logging" @classmethod def get_description(cls): return "Security group logging extension." @classmethod def get_namespace(cls): # todo return "https://docs.openstack.org/ext/security_group_logging/api/v2.0" @classmethod def get_updated(cls): return "2015-04-13T10:00:00-00:00" def get_required_extensions(self): return ["security-group"] @classmethod def get_resources(cls): """Returns Ext Resources.""" return [] def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} vmware-nsx-12.0.1/vmware_nsx/extensions/__init__.py0000666000175100017510000000000013244523345022424 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/extensions/maclearning.py0000666000175100017510000000334413244523345023163 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import converters from neutron_lib.api import extensions from neutron_lib import constants MAC_LEARNING = 'mac_learning_enabled' EXTENDED_ATTRIBUTES_2_0 = { 'ports': { MAC_LEARNING: {'allow_post': True, 'allow_put': True, 'convert_to': converters.convert_to_boolean, 'default': constants.ATTR_NOT_SPECIFIED, 'is_visible': True}, } } class Maclearning(extensions.ExtensionDescriptor): """Extension class supporting port mac learning.""" @classmethod def get_name(cls): return "MAC Learning" @classmethod def get_alias(cls): return "mac-learning" @classmethod def get_description(cls): return "Provides MAC learning capabilities." @classmethod def get_updated(cls): return "2013-05-1T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" return [] def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} vmware-nsx-12.0.1/vmware_nsx/extensions/edge_service_gateway_bgp_peer.py0000666000175100017510000000651313244523345026714 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from neutron_lib.api import extensions from neutron_lib.api import validators from neutron_lib import exceptions as nexception from vmware_nsx._i18n import _ EDGE_SERVICE_GW = 'esg_id' EDGE_ID_MAX_LEN = 15 ESG_BGP_PEER_EXT_ALIAS = 'edge-service-gateway-bgp-peer' def _validate_edge_service_gw_id(esg_id, valid_values=None): if esg_id is None: return msg = validators.validate_string(esg_id, max_len=EDGE_ID_MAX_LEN) if msg: return msg if re.match(r'^edge-[1-9]+[0-9]*$', esg_id) is None: msg = _("'%s' is not a valid edge service gateway id.") % esg_id return msg validators.add_validator('validate_edge_service_gw_id', _validate_edge_service_gw_id) RESOURCE_ATTRIBUTE_MAP = { 'bgp-peers': { EDGE_SERVICE_GW: { 'allow_post': True, 'allow_put': False, 'default': None, 'validate': {'type:validate_edge_service_gw_id': None}, 'enforce_policy': True, 'is_visible': True, 'required_by_policy': False } } } class BgpDisabledOnEsgPeer(nexception.InvalidInput): message = _("To add this peer to BGP speaker you must first enable BGP on " "the associated ESG - '%(esg_id)s'.") class EsgRemoteASDoNotMatch(nexception.InvalidInput): message = _("Specified remote AS is '%(remote_as)s', but ESG '%(esg_id)s' " "is configured on AS %(esg_as)s.") class ExternalSubnetHasGW(nexception.InvalidInput): message = _("Subnet '%(subnet_id)s' on external network '%(network_id)s' " "is configured with gateway IP, set to None before enabling " "BGP on the network.") class EsgInternalIfaceDoesNotMatch(nexception.InvalidInput): message = _("Given BGP peer IP address doesn't match " "any interface on ESG '%(esg_id)s'") class Edge_service_gateway_bgp_peer(extensions.ExtensionDescriptor): """Extension class to allow identifying of-peer with specificN SXv edge service gateway. """ @classmethod def get_name(cls): return "Edge service gateway bgp peer" @classmethod def get_alias(cls): return ESG_BGP_PEER_EXT_ALIAS @classmethod def get_description(cls): return ("Adding a new (optional) attribute 'esg_id' to bgp-peer " "resource, where esg_id is a valid NSXv Edge service gateway " "id.") @classmethod def get_updated(cls): return "2017-04-01T10:00:00-00:00" def get_required_extensions(self): return ["bgp"] def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} vmware-nsx-12.0.1/vmware_nsx/extensions/dns_search_domain.py0000666000175100017510000000646413244523345024351 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from neutron_lib.api import extensions from neutron_lib.api import validators from neutron_lib import constants from neutron_lib.db import constants as db_const from vmware_nsx._i18n import _ DNS_LABEL_MAX_LEN = 63 DNS_LABEL_REGEX = "[a-zA-Z0-9-]{1,%d}$" % DNS_LABEL_MAX_LEN def _validate_dns_format(data): if not data: return try: # Allow values ending in period '.' trimmed = data if not data.endswith('.') else data[:-1] names = trimmed.split('.') for name in names: if not name: raise TypeError(_("Encountered an empty component")) if name.endswith('-') or name[0] == '-': raise TypeError( _("Name '%s' must not start or end with a hyphen") % name) if not re.match(DNS_LABEL_REGEX, name): raise TypeError( _("Name '%s' must be 1-63 characters long, each of " "which can only be alphanumeric or a hyphen") % name) # RFC 1123 hints that a TLD can't be all numeric. last is a TLD if # it's an FQDN. if len(names) > 1 and re.match("^[0-9]+$", names[-1]): raise TypeError(_("TLD '%s' must not be all numeric") % names[-1]) except TypeError as e: msg = _("'%(data)s' not a valid DNS search domain. Reason: " "%(reason)s") % {'data': data, 'reason': str(e)} return msg def _validate_dns_search_domain(data, max_len=db_const.NAME_FIELD_SIZE): msg = validators.validate_string(data, max_len) if msg: return msg if not data: return msg = _validate_dns_format(data) if msg: return msg validators.add_validator('dns_search_domain', _validate_dns_search_domain) DNS_SEARCH_DOMAIN = 'dns_search_domain' EXTENDED_ATTRIBUTES_2_0 = { 'subnets': { DNS_SEARCH_DOMAIN: { 'allow_post': True, 'allow_put': True, 'default': constants.ATTR_NOT_SPECIFIED, 'validate': {'type:dns_search_domain': db_const.NAME_FIELD_SIZE}, 'is_visible': True}, } } class Dns_search_domain(extensions.ExtensionDescriptor): """Extension class supporting dns search domains for subnets.""" @classmethod def get_name(cls): return "DNS search Domains" @classmethod def get_alias(cls): return "dns-search-domain" @classmethod def get_description(cls): return "Enable the ability to add DNS search domain name for Subnets" @classmethod def get_updated(cls): return "2016-1-22T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 return {} vmware-nsx-12.0.1/vmware_nsx/extensions/api_replay.py0000666000175100017510000000477513244523345023041 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api import extensions from neutron_lib.db import constants as db_const # The attributes map is here for 2 reasons: # 1) allow posting id for the different objects we are importing # 2) make sure security-group named 'default' is also copied ID_WITH_POST = {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True} RESOURCE_ATTRIBUTE_MAP = { 'ports': { 'id': ID_WITH_POST, }, 'networks': { 'id': ID_WITH_POST, }, 'security_groups': { 'id': ID_WITH_POST, 'name': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {'type:string': db_const.NAME_FIELD_SIZE}}, }, 'security_group_rules': { 'id': ID_WITH_POST, }, 'routers': { 'id': ID_WITH_POST, }, 'policies': { # QoS policies 'id': ID_WITH_POST, }, } class Api_replay(extensions.ExtensionDescriptor): """Extension for api replay which allows us to specify ids of resources.""" @classmethod def get_name(cls): return "Api Replay" @classmethod def get_alias(cls): return 'api-replay' @classmethod def get_description(cls): return "Enables mode to allow api to be replayed" @classmethod def get_updated(cls): return "2016-05-05T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} def get_required_extensions(self): # make sure this extension is called after those, so our change # will not be overridden return ["security-group", "router"] def get_optional_extensions(self): # QoS is optional since it is not always enabled return ["qos"] vmware-nsx-12.0.1/vmware_nsx/extensions/secgroup_rule_local_ip_prefix.py0000666000175100017510000000366013244523345027001 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.extensions import securitygroup from neutron_lib.api import extensions from neutron_lib import constants LOCAL_IP_PREFIX = 'local_ip_prefix' RESOURCE_ATTRIBUTE_MAP = { 'security_group_rules': { LOCAL_IP_PREFIX: { 'allow_post': True, 'allow_put': False, 'convert_to': securitygroup.convert_ip_prefix_to_cidr, 'default': constants.ATTR_NOT_SPECIFIED, 'enforce_policy': True, 'is_visible': True} } } class Secgroup_rule_local_ip_prefix(extensions.ExtensionDescriptor): """Extension class to add support for specifying local-ip-prefix in a security-group rule. """ @classmethod def get_name(cls): return "Security Group rule local ip prefix" @classmethod def get_alias(cls): return "secgroup-rule-local-ip-prefix" @classmethod def get_description(cls): return ("Enable to specify the 'local-ip-prefix' when creating a " "security-group rule.") @classmethod def get_updated(cls): return "2016-03-01T10:00:00-00:00" def get_required_extensions(self): return ["security-group"] def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} vmware-nsx-12.0.1/vmware_nsx/extensions/securitygrouppolicy.py0000666000175100017510000000367113244523345025052 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from neutron_lib import exceptions as nexception from vmware_nsx._i18n import _ POLICY = 'policy' RESOURCE_ATTRIBUTE_MAP = { 'security_groups': { POLICY: { 'allow_post': True, 'allow_put': True, 'enforce_policy': True, 'is_visible': True, 'default': None} } } class PolicySecurityGroupDeleteNotAdmin(nexception.NotAuthorized): message = _("Security group %(id)s is a policy security group and " "requires an admin to delete it.") class Securitygrouppolicy(extensions.ExtensionDescriptor): """Security group policy extension.""" @classmethod def get_name(cls): return "Security group policy" @classmethod def get_alias(cls): return "security-group-policy" @classmethod def get_description(cls): return "Security group policy extension." @classmethod def get_updated(cls): return "2016-10-06T10:00:00-00:00" def get_required_extensions(self): return ["security-group"] @classmethod def get_resources(cls): """Returns Ext Resources.""" return [] def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} vmware-nsx-12.0.1/vmware_nsx/extensions/routersize.py0000666000175100017510000000327513244523345023121 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from neutron_lib import constants ROUTER_SIZE = 'router_size' VALID_EDGE_SIZES = ['compact', 'large', 'xlarge', 'quadlarge'] EXTENDED_ATTRIBUTES_2_0 = { 'routers': { ROUTER_SIZE: {'allow_post': True, 'allow_put': True, 'validate': {'type:values': VALID_EDGE_SIZES}, 'default': constants.ATTR_NOT_SPECIFIED, 'is_visible': True}, } } class Routersize(extensions.ExtensionDescriptor): """Extension class supporting router size.""" @classmethod def get_name(cls): return "Router Size" @classmethod def get_alias(cls): return "nsxv-router-size" @classmethod def get_description(cls): return "Enables configuration of NSXv Edge Size" @classmethod def get_updated(cls): return "2015-9-22T10:00:00-00:00" def get_required_extensions(self): return ["router"] def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 return {} vmware-nsx-12.0.1/vmware_nsx/extensions/nsxpolicy.py0000666000175100017510000000635513244523345022740 0ustar zuulzuul00000000000000# Copyright 2016 VMware. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from neutron.api.v2 import resource_helper from neutron_lib.api import extensions from neutron_lib import exceptions as nexception from vmware_nsx._i18n import _ POLICY_RESOURCE_NAME = "nsx_policy" # Use dash for alias and collection name EXT_ALIAS = POLICY_RESOURCE_NAME.replace('_', '-') NSX_POLICIES = "nsx_policies" # The nsx-policies table is read only RESOURCE_ATTRIBUTE_MAP = { NSX_POLICIES: { 'id': { 'allow_post': False, 'allow_put': False, 'is_visible': True}, 'name': { 'allow_post': False, 'allow_put': False, 'is_visible': True}, 'description': { 'allow_post': False, 'allow_put': False, 'is_visible': True}, } } class Nsxpolicy(extensions.ExtensionDescriptor): """API extension for NSX policies.""" @classmethod def get_name(cls): return "NSX Policy" @classmethod def get_alias(cls): return EXT_ALIAS @classmethod def get_description(cls): return "NSX security policies." @classmethod def get_updated(cls): return "2016-11-20T00:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) member_actions = {} return resource_helper.build_resource_info(plural_mappings, RESOURCE_ATTRIBUTE_MAP, None, action_map=member_actions, register_quota=True, translate_name=True) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} class NsxPolicyReadOnly(nexception.NotAuthorized): message = _("NSX policies are read-only.") class NsxPolicyPluginBase(object): @abc.abstractmethod def create_nsx_policy(self, context, nsx_policy): raise NsxPolicyReadOnly() @abc.abstractmethod def update_nsx_policy(self, context, id, nsx_policy): raise NsxPolicyReadOnly() @abc.abstractmethod def get_nsx_policy(self, context, id, fields=None): pass @abc.abstractmethod def delete_nsx_policy(self, context, id): raise NsxPolicyReadOnly() @abc.abstractmethod def get_nsx_policies(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass vmware-nsx-12.0.1/vmware_nsx/extensions/housekeeper.py0000666000175100017510000000632313244523345023222 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron.api.v2 import resource_helper from neutron_lib.api import extensions from neutron_lib import exceptions as nexception from vmware_nsx._i18n import _ HOUSEKEEPER_RESOURCE_NAME = "housekeeper" HOUSEKEEPERS = "housekeepers" # The housekeeper tasks table is read only RESOURCE_ATTRIBUTE_MAP = { HOUSEKEEPERS: { 'name': { 'allow_post': False, 'allow_put': False, 'is_visible': True}, 'description': { 'allow_post': False, 'allow_put': False, 'is_visible': True}, 'enabled': { 'allow_post': False, 'allow_put': False, 'is_visible': True}, } } class Housekeeper(extensions.ExtensionDescriptor): """API extension for NSX housekeeper jobs.""" @classmethod def get_name(cls): return "Housekeeper" @classmethod def get_alias(cls): return HOUSEKEEPER_RESOURCE_NAME @classmethod def get_description(cls): return "NSX plugin housekeeping services." @classmethod def get_updated(cls): return "2016-11-20T00:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) member_actions = {} return resource_helper.build_resource_info(plural_mappings, RESOURCE_ATTRIBUTE_MAP, None, action_map=member_actions, register_quota=True, translate_name=True) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} class HousekeeperReadOnly(nexception.NotAuthorized): message = _("NSX housekeeper tasks are read-only.") class HousekeeperPluginBase(object): @abc.abstractmethod def create_housekeeper(self, context, housekeeper): raise HousekeeperReadOnly() @abc.abstractmethod def update_housekeeper(self, context, name, housekeeper): pass @abc.abstractmethod def get_housekeeper(self, context, name, fields=None): pass @abc.abstractmethod def delete_housekeeper(self, context, name): raise HousekeeperReadOnly() @abc.abstractmethod def get_housekeepers(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass vmware-nsx-12.0.1/vmware_nsx/extensions/projectpluginmap.py0000666000175100017510000001111413244523345024260 0ustar zuulzuul00000000000000# Copyright 2017 VMware. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from neutron.api.v2 import resource_helper from neutron_lib.api import extensions from neutron_lib.db import constants as db_const from neutron_lib import exceptions as nexception from vmware_nsx._i18n import _ PROJECT_PLUGIN_RESOURCE_NAME = "project_plugin_map" # Use dash for alias and collection name EXT_ALIAS = PROJECT_PLUGIN_RESOURCE_NAME.replace('_', '-') PROJECT_PLUGINS = "project_plugin_maps" class NsxPlugins(object): NSX_V = 'nsx-v' NSX_T = 'nsx-t' DVS = 'dvs' VALID_TYPES = [NsxPlugins.NSX_V, NsxPlugins.NSX_T, NsxPlugins.DVS] RESOURCE_ATTRIBUTE_MAP = { PROJECT_PLUGINS: { 'id': { 'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, # project is the id of the project mapped by this entry 'project': { 'allow_post': True, 'allow_put': False, 'is_visible': True}, 'plugin': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'validate': {'type:values': VALID_TYPES}}, # tenant id is the id of tenant/project owning this entry 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': { 'type:string': db_const.PROJECT_ID_FIELD_SIZE}, 'required_by_policy': True, 'is_visible': True}, } } class Projectpluginmap(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Project Plugin Mapping" @classmethod def get_alias(cls): return EXT_ALIAS @classmethod def get_description(cls): return "Per Project Core Plugin." @classmethod def get_updated(cls): return "2017-12-05T00:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) member_actions = {} return resource_helper.build_resource_info(plural_mappings, RESOURCE_ATTRIBUTE_MAP, None, action_map=member_actions, register_quota=True, translate_name=True) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} class ProjectPluginReadOnly(nexception.NotAuthorized): message = _("Project Plugin map entries cannot be modified.") class ProjectPluginAlreadyExists(nexception.Conflict): message = _("Project Plugin map already exists for project " "%(project_id)s.") class ProjectPluginAdminOnly(nexception.NotAuthorized): message = _("Project Plugin map can be added only by an admin user.") class ProjectPluginIllegalId(nexception.Conflict): message = _("Project ID %(project_id)s is illegal.") class ProjectPluginNotAvailable(nexception.NotAuthorized): message = _("Plugin %(plugin)s is not available.") class ProjectPluginMapPluginBase(object): @abc.abstractmethod def create_project_plugin_map(self, context, project_plugin_map): pass @abc.abstractmethod def update_project_plugin_map(self, context, id, project_plugin_map): raise ProjectPluginReadOnly() @abc.abstractmethod def get_project_plugin_map(self, context, id, fields=None): pass @abc.abstractmethod def delete_project_plugin_map(self, context, id): # TODO(asarfaty): delete when the project is deleted? raise ProjectPluginReadOnly() @abc.abstractmethod def get_project_plugin_maps(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass vmware-nsx-12.0.1/vmware_nsx/api_client/0000775000175100017510000000000013244524600020226 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/api_client/exception.py0000666000175100017510000000614313244523345022611 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from vmware_nsx._i18n import _ class NsxApiException(Exception): """Base NSX API Client Exception. To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") def __init__(self, **kwargs): try: self._error_string = self.message % kwargs except Exception: # at least get the core message out if something happened self._error_string = self.message def __str__(self): return self._error_string class UnAuthorizedRequest(NsxApiException): message = _("Server denied session's authentication credentials.") class ResourceNotFound(NsxApiException): message = _("An entity referenced in the request was not found.") class Conflict(NsxApiException): message = _("Request conflicts with configuration on a different " "entity.") class ServiceUnavailable(NsxApiException): message = _("Request could not completed because the associated " "resource could not be reached.") class Forbidden(NsxApiException): message = _("The request is forbidden from accessing the " "referenced resource.") class ReadOnlyMode(Forbidden): message = _("Create/Update actions are forbidden when in read-only mode.") class RequestTimeout(NsxApiException): message = _("The request has timed out.") class BadRequest(NsxApiException): message = _("The server is unable to fulfill the request due " "to a bad syntax") class InvalidSecurityCertificate(BadRequest): message = _("The backend received an invalid security certificate.") def fourZeroZero(response=None): if response and "Invalid SecurityCertificate" in response.body: raise InvalidSecurityCertificate() raise BadRequest() def fourZeroFour(response=None): raise ResourceNotFound() def fourZeroNine(response=None): raise Conflict() def fiveZeroThree(response=None): raise ServiceUnavailable() def fourZeroThree(response=None): if 'read-only' in response.body: raise ReadOnlyMode() else: raise Forbidden() def zero(self, response=None): raise NsxApiException() ERROR_MAPPINGS = { 400: fourZeroZero, 404: fourZeroFour, 405: zero, 409: fourZeroNine, 503: fiveZeroThree, 403: fourZeroThree, 301: zero, 307: zero, 500: zero, 501: zero } vmware-nsx-12.0.1/vmware_nsx/api_client/eventlet_client.py0000666000175100017510000001465213244523345024003 0ustar zuulzuul00000000000000# Copyright 2012 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import time import eventlet eventlet.monkey_patch() from oslo_log import log as logging from vmware_nsx.api_client import base from vmware_nsx.api_client import eventlet_request LOG = logging.getLogger(__name__) class EventletApiClient(base.ApiClientBase): """Eventlet-based implementation of NSX ApiClient ABC.""" def __init__(self, api_providers, user, password, concurrent_connections=base.DEFAULT_CONCURRENT_CONNECTIONS, gen_timeout=base.GENERATION_ID_TIMEOUT, use_https=True, connect_timeout=base.DEFAULT_CONNECT_TIMEOUT): '''Constructor :param api_providers: a list of tuples of the form: (host, port, is_ssl). :param user: login username. :param password: login password. :param concurrent_connections: total number of concurrent connections. :param use_https: whether or not to use https for requests. :param connect_timeout: connection timeout in seconds. :param gen_timeout controls how long the generation id is kept if set to -1 the generation id is never timed out ''' if not api_providers: api_providers = [] self._api_providers = set([tuple(p) for p in api_providers]) self._api_provider_data = {} # tuple(semaphore, session_cookie) for p in self._api_providers: self._set_provider_data(p, (eventlet.semaphore.Semaphore(1), None)) self._user = user self._password = password self._concurrent_connections = concurrent_connections self._use_https = use_https self._connect_timeout = connect_timeout self._config_gen = None self._config_gen_ts = None self._gen_timeout = gen_timeout # Connection pool is a list of queues. self._conn_pool = eventlet.queue.PriorityQueue() self._next_conn_priority = 1 for __ in range(concurrent_connections): for host, port, is_ssl in api_providers: conn = self._create_connection(host, port, is_ssl) self._conn_pool.put((self._next_conn_priority, conn)) self._next_conn_priority += 1 def acquire_redirect_connection(self, conn_params, auto_login=True, headers=None): """Check out or create connection to redirected NSX API server. Args: conn_params: tuple specifying target of redirect, see self._conn_params() auto_login: returned connection should have valid session cookie headers: headers to pass on if auto_login Returns: An available HTTPConnection instance corresponding to the specified conn_params. If a connection did not previously exist, new connections are created with the highest prioity in the connection pool and one of these new connections returned. """ result_conn = None data = self._get_provider_data(conn_params) if data: # redirect target already exists in provider data and connections # to the provider have been added to the connection pool. Try to # obtain a connection from the pool, note that it's possible that # all connection to the provider are currently in use. conns = [] while not self._conn_pool.empty(): priority, conn = self._conn_pool.get_nowait() if not result_conn and self._conn_params(conn) == conn_params: conn.priority = priority result_conn = conn else: conns.append((priority, conn)) for priority, conn in conns: self._conn_pool.put((priority, conn)) # hack: if no free connections available, create new connection # and stash "no_release" attribute (so that we only exceed # self._concurrent_connections temporarily) if not result_conn: conn = self._create_connection(*conn_params) conn.priority = 0 # redirect connections have highest priority conn.no_release = True result_conn = conn else: #redirect target not already known, setup provider lists self._api_providers.update([conn_params]) self._set_provider_data(conn_params, (eventlet.semaphore.Semaphore(1), None)) # redirects occur during cluster upgrades, i.e. results to old # redirects to new, so give redirect targets highest priority priority = 0 for i in range(self._concurrent_connections): conn = self._create_connection(*conn_params) conn.priority = priority if i == self._concurrent_connections - 1: break self._conn_pool.put((priority, conn)) result_conn = conn if result_conn: result_conn.last_used = time.time() if auto_login and self.auth_cookie(conn) is None: self._wait_for_login(result_conn, headers) return result_conn def _login(self, conn=None, headers=None): '''Issue login request and update authentication cookie.''' cookie = None g = eventlet_request.LoginRequestEventlet( self, self._user, self._password, conn, headers) g.start() ret = g.join() if ret: if isinstance(ret, Exception): LOG.error('Login error "%s"', ret) raise ret cookie = ret.getheader("Set-Cookie") if cookie: LOG.debug("Saving new authentication cookie '%s'", cookie) return cookie # Register as subclass. base.ApiClientBase.register(EventletApiClient) vmware-nsx-12.0.1/vmware_nsx/api_client/version.py0000666000175100017510000000263413244523345022301 0ustar zuulzuul00000000000000# Copyright 2012 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging LOG = logging.getLogger(__name__) def find_version(headers): """Retrieve NSX controller version from response headers.""" for (header_name, header_value) in (headers or ()): try: if header_name == 'server': return Version(header_value.split('/')[1]) except IndexError: LOG.warning("Unable to fetch NSX version from response " "headers :%s", headers) class Version(object): """Abstracts NSX version by exposing major and minor.""" def __init__(self, version): self.full_version = version.split('.') self.major = int(self.full_version[0]) self.minor = int(self.full_version[1]) def __str__(self): return '.'.join(self.full_version) vmware-nsx-12.0.1/vmware_nsx/api_client/request.py0000666000175100017510000002674113244523345022311 0ustar zuulzuul00000000000000# Copyright 2012 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import copy import socket import time import eventlet from oslo_log import log as logging from oslo_utils import excutils import six from six.moves import http_client as httplib import six.moves.urllib.parse as urlparse from vmware_nsx._i18n import _ from vmware_nsx import api_client LOG = logging.getLogger(__name__) DEFAULT_HTTP_TIMEOUT = 30 DEFAULT_RETRIES = 2 DEFAULT_REDIRECTS = 2 DEFAULT_API_REQUEST_POOL_SIZE = 1000 DEFAULT_MAXIMUM_REQUEST_ID = 4294967295 DOWNLOAD_TIMEOUT = 180 @six.add_metaclass(abc.ABCMeta) class ApiRequest(object): '''An abstract baseclass for all ApiRequest implementations. This defines the interface and property structure for both eventlet and gevent-based ApiRequest classes. ''' # List of allowed status codes. ALLOWED_STATUS_CODES = [ httplib.OK, httplib.CREATED, httplib.NO_CONTENT, httplib.MOVED_PERMANENTLY, httplib.TEMPORARY_REDIRECT, httplib.BAD_REQUEST, httplib.UNAUTHORIZED, httplib.FORBIDDEN, httplib.NOT_FOUND, httplib.CONFLICT, httplib.INTERNAL_SERVER_ERROR, httplib.SERVICE_UNAVAILABLE ] @abc.abstractmethod def start(self): pass @abc.abstractmethod def join(self): pass @abc.abstractmethod def copy(self): pass def _issue_request(self): '''Issue a request to a provider.''' conn = (self._client_conn or self._api_client.acquire_connection(True, copy.copy(self._headers), rid=self._rid())) if conn is None: error = Exception(_("No API connections available")) self._request_error = error return error url = self._url LOG.debug("[%(rid)d] Issuing - request url: %(conn)s " "body: %(body)s", {'rid': self._rid(), 'conn': self._request_str(conn, url), 'body': self._body}) issued_time = time.time() is_conn_error = False is_conn_service_unavail = False response = None try: redirects = 0 while (redirects <= self._redirects): # Update connection with user specified request timeout, # the connect timeout is usually smaller so we only set # the request timeout after a connection is established if conn.sock is None: conn.connect() conn.sock.settimeout(self._http_timeout) elif conn.sock.gettimeout() != self._http_timeout: conn.sock.settimeout(self._http_timeout) headers = copy.copy(self._headers) cookie = self._api_client.auth_cookie(conn) if cookie: headers["Cookie"] = cookie gen = self._api_client.config_gen if gen: headers["X-Nvp-Wait-For-Config-Generation"] = gen LOG.debug("Setting X-Nvp-Wait-For-Config-Generation " "request header: '%s'", gen) try: conn.request(self._method, url, self._body, headers) except Exception as e: with excutils.save_and_reraise_exception(): LOG.warning("[%(rid)d] Exception issuing request: " "%(e)s", {'rid': self._rid(), 'e': e}) response = conn.getresponse() response.body = response.read() response.headers = response.getheaders() elapsed_time = time.time() - issued_time LOG.debug("[%(rid)d] Completed request '%(conn)s': " "%(status)s (%(elapsed)s seconds)", {'rid': self._rid(), 'conn': self._request_str(conn, url), 'status': response.status, 'elapsed': elapsed_time}) new_gen = response.getheader('X-Nvp-Config-Generation', None) if new_gen: LOG.debug("Reading X-Nvp-config-Generation response " "header: '%s'", new_gen) if (self._api_client.config_gen is None or self._api_client.config_gen < int(new_gen)): self._api_client.config_gen = int(new_gen) if response.status == httplib.UNAUTHORIZED: # If request is unauthorized, clear the session cookie # for the current provider so that subsequent requests # to the same provider triggers re-authentication. self._api_client.set_auth_cookie(conn, None) elif response.status == httplib.SERVICE_UNAVAILABLE: is_conn_service_unavail = True if response.status not in [httplib.MOVED_PERMANENTLY, httplib.TEMPORARY_REDIRECT]: break elif redirects >= self._redirects: LOG.info("[%d] Maximum redirects exceeded, aborting " "request", self._rid()) break redirects += 1 conn, url = self._redirect_params(conn, response.headers, self._client_conn is None) if url is None: response.status = httplib.INTERNAL_SERVER_ERROR break LOG.info("[%(rid)d] Redirecting request to: %(conn)s", {'rid': self._rid(), 'conn': self._request_str(conn, url)}) # yield here, just in case we are not out of the loop yet eventlet.greenthread.sleep(0) # If we receive any of these responses, then # our server did not process our request and may be in an # errored state. Raise an exception, which will cause the # conn to be released with is_conn_error == True # which puts the conn on the back of the client's priority # queue. if (response.status == httplib.INTERNAL_SERVER_ERROR and response.status > httplib.NOT_IMPLEMENTED): LOG.warning("[%(rid)d] Request '%(method)s %(url)s' " "received: %(status)s", {'rid': self._rid(), 'method': self._method, 'url': self._url, 'status': response.status}) raise Exception(_('Server error return: %s'), response.status) return response except socket.error: is_conn_service_unavail = True except Exception as e: if isinstance(e, httplib.BadStatusLine): msg = (_("Invalid server response")) else: msg = str(e) if response is None: elapsed_time = time.time() - issued_time LOG.warning("[%(rid)d] Failed request '%(conn)s': '%(msg)s' " "(%(elapsed)s seconds)", {'rid': self._rid(), 'conn': self._request_str(conn, url), 'msg': msg, 'elapsed': elapsed_time}) self._request_error = e is_conn_error = True return e finally: # Make sure we release the original connection provided by the # acquire_connection() call above. if self._client_conn is None: self._api_client.release_connection(conn, is_conn_error, is_conn_service_unavail, rid=self._rid()) def _redirect_params(self, conn, headers, allow_release_conn=False): """Process redirect response, create new connection if necessary. Args: conn: connection that returned the redirect response headers: response headers of the redirect response allow_release_conn: if redirecting to a different server, release existing connection back to connection pool. Returns: Return tuple(conn, url) where conn is a connection object to the redirect target and url is the path of the API request """ url = None for name, value in headers: if name.lower() == "location": url = value break if not url: LOG.warning("[%d] Received redirect status without location " "header field", self._rid()) return (conn, None) # Accept location with the following format: # 1. /path, redirect to same node # 2. scheme://hostname:[port]/path where scheme is https or http # Reject others # 3. e.g. relative paths, unsupported scheme, unspecified host result = urlparse.urlparse(url) if not result.scheme and not result.hostname and result.path: if result.path[0] == "/": if result.query: url = "%s?%s" % (result.path, result.query) else: url = result.path return (conn, url) # case 1 else: LOG.warning("[%(rid)d] Received invalid redirect " "location: '%(url)s'", {'rid': self._rid(), 'url': url}) return (conn, None) # case 3 elif result.scheme not in ["http", "https"] or not result.hostname: LOG.warning("[%(rid)d] Received malformed redirect " "location: %(url)s", {'rid': self._rid(), 'url': url}) return (conn, None) # case 3 # case 2, redirect location includes a scheme # so setup a new connection and authenticate if allow_release_conn: self._api_client.release_connection(conn) conn_params = (result.hostname, result.port, result.scheme == "https") conn = self._api_client.acquire_redirect_connection(conn_params, True, self._headers) if result.query: url = "%s?%s" % (result.path, result.query) else: url = result.path return (conn, url) def _rid(self): '''Return current request id.''' return self._request_id @property def request_error(self): '''Return any errors associated with this instance.''' return self._request_error def _request_str(self, conn, url): '''Return string representation of connection.''' return "%s %s/%s" % (self._method, api_client.ctrl_conn_to_str(conn), url) vmware-nsx-12.0.1/vmware_nsx/api_client/client.py0000666000175100017510000001313013244523345022063 0ustar zuulzuul00000000000000# Copyright 2012 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from six.moves import http_client as httplib from vmware_nsx.api_client import base from vmware_nsx.api_client import eventlet_client from vmware_nsx.api_client import eventlet_request from vmware_nsx.api_client import exception from vmware_nsx.api_client import version LOG = logging.getLogger(__name__) class NsxApiClient(eventlet_client.EventletApiClient): """The Nsx API Client.""" def __init__(self, api_providers, user, password, concurrent_connections=base.DEFAULT_CONCURRENT_CONNECTIONS, gen_timeout=base.GENERATION_ID_TIMEOUT, use_https=True, connect_timeout=base.DEFAULT_CONNECT_TIMEOUT, http_timeout=75, retries=2, redirects=2): '''Constructor. Adds the following: :param http_timeout: how long to wait before aborting an unresponsive controller (and allow for retries to another controller in the cluster) :param retries: the number of concurrent connections. :param redirects: the number of concurrent connections. ''' super(NsxApiClient, self).__init__( api_providers, user, password, concurrent_connections=concurrent_connections, gen_timeout=gen_timeout, use_https=use_https, connect_timeout=connect_timeout) self._request_timeout = http_timeout * retries self._http_timeout = http_timeout self._retries = retries self._redirects = redirects self._version = None # NOTE(salvatore-orlando): This method is not used anymore. Login is now # performed automatically inside the request eventlet if necessary. def login(self, user=None, password=None): '''Login to NSX controller. Assumes same password is used for all controllers. :param user: controller user (usually admin). Provided for backwards compatibility. In the normal mode of operation this should be None. :param password: controller password. Provided for backwards compatibility. In the normal mode of operation this should be None. ''' if user: self._user = user if password: self._password = password return self._login() def request(self, method, url, body="", content_type="application/json"): '''Issues request to controller.''' g = eventlet_request.GenericRequestEventlet( self, method, url, body, content_type, auto_login=True, http_timeout=self._http_timeout, retries=self._retries, redirects=self._redirects) g.start() response = g.join() LOG.debug('Request returns "%s"', response) # response is a modified HTTPResponse object or None. # response.read() will not work on response as the underlying library # request_eventlet.ApiRequestEventlet has already called this # method in order to extract the body and headers for processing. # ApiRequestEventlet derived classes call .read() and # .getheaders() on the HTTPResponse objects and store the results in # the response object's .body and .headers data members for future # access. if response is None: # Timeout. LOG.error('Request timed out: %(method)s to %(url)s', {'method': method, 'url': url}) raise exception.RequestTimeout() status = response.status if status == httplib.UNAUTHORIZED: raise exception.UnAuthorizedRequest() # Fail-fast: Check for exception conditions and raise the # appropriate exceptions for known error codes. if status in exception.ERROR_MAPPINGS: LOG.error("Received error code: %s", status) LOG.error("Server Error Message: %s", response.body) exception.ERROR_MAPPINGS[status](response) # Continue processing for non-error condition. if (status != httplib.OK and status != httplib.CREATED and status != httplib.NO_CONTENT): LOG.error("%(method)s to %(url)s, unexpected response code: " "%(status)d (content = '%(body)s')", {'method': method, 'url': url, 'status': response.status, 'body': response.body}) return None if not self._version: self._version = version.find_version(response.headers) return response.body def get_version(self): if not self._version: # Determine the controller version by querying the # cluster nodes. Currently, the version will be the # one of the server that responds. self.request('GET', '/ws.v1/control-cluster/node') if not self._version: LOG.error('Unable to determine NSX version. ' 'Plugin might not work as expected.') return self._version vmware-nsx-12.0.1/vmware_nsx/api_client/__init__.py0000666000175100017510000000205613244523345022351 0ustar zuulzuul00000000000000# Copyright 2012 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from six.moves import http_client as httplib from vmware_nsx._i18n import _ def ctrl_conn_to_str(conn): """Returns a string representing a connection URL to the controller.""" if isinstance(conn, httplib.HTTPSConnection): proto = "https://" elif isinstance(conn, httplib.HTTPConnection): proto = "http://" else: raise TypeError(_('Invalid connection type: %s') % type(conn)) return "%s%s:%s" % (proto, conn.host, conn.port) vmware-nsx-12.0.1/vmware_nsx/api_client/eventlet_request.py0000666000175100017510000002147113244523345024212 0ustar zuulzuul00000000000000# Copyright 2012 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet from oslo_log import log as logging from oslo_serialization import jsonutils from six.moves import http_client as httplib from six.moves.urllib import parse from vmware_nsx._i18n import _ from vmware_nsx.api_client import request LOG = logging.getLogger(__name__) USER_AGENT = "Neutron eventlet client/2.0" class EventletApiRequest(request.ApiRequest): '''Eventlet-based ApiRequest class. This class will form the basis for eventlet-based ApiRequest classes ''' # Maximum number of green threads present in the system at one time. API_REQUEST_POOL_SIZE = request.DEFAULT_API_REQUEST_POOL_SIZE # Pool of green threads. One green thread is allocated per incoming # request. Incoming requests will block when the pool is empty. API_REQUEST_POOL = eventlet.GreenPool(API_REQUEST_POOL_SIZE) # A unique id is assigned to each incoming request. When the current # request id reaches MAXIMUM_REQUEST_ID it wraps around back to 0. MAXIMUM_REQUEST_ID = request.DEFAULT_MAXIMUM_REQUEST_ID # The request id for the next incoming request. CURRENT_REQUEST_ID = 0 def __init__(self, client_obj, url, method="GET", body=None, headers=None, retries=request.DEFAULT_RETRIES, auto_login=True, redirects=request.DEFAULT_REDIRECTS, http_timeout=request.DEFAULT_HTTP_TIMEOUT, client_conn=None): '''Constructor.''' self._api_client = client_obj self._url = url self._method = method self._body = body self._headers = headers or {} self._request_timeout = http_timeout * retries self._retries = retries self._auto_login = auto_login self._redirects = redirects self._http_timeout = http_timeout self._client_conn = client_conn self._request_error = None if "User-Agent" not in self._headers: self._headers["User-Agent"] = USER_AGENT self._green_thread = None # Retrieve and store this instance's unique request id. self._request_id = EventletApiRequest.CURRENT_REQUEST_ID # Update the class variable that tracks request id. # Request IDs wrap around at MAXIMUM_REQUEST_ID next_request_id = self._request_id + 1 next_request_id %= self.MAXIMUM_REQUEST_ID EventletApiRequest.CURRENT_REQUEST_ID = next_request_id @classmethod def _spawn(cls, func, *args, **kwargs): '''Allocate a green thread from the class pool.''' return cls.API_REQUEST_POOL.spawn(func, *args, **kwargs) def spawn(self, func, *args, **kwargs): '''Spawn a new green thread with the supplied function and args.''' return self.__class__._spawn(func, *args, **kwargs) @classmethod def joinall(cls): '''Wait for all outstanding requests to complete.''' return cls.API_REQUEST_POOL.waitall() def join(self): '''Wait for instance green thread to complete.''' if self._green_thread is not None: return self._green_thread.wait() return Exception(_('Joining an invalid green thread')) def start(self): '''Start request processing.''' self._green_thread = self.spawn(self._run) def copy(self): '''Return a copy of this request instance.''' return EventletApiRequest( self._api_client, self._url, self._method, self._body, self._headers, self._retries, self._auto_login, self._redirects, self._http_timeout) def _run(self): '''Method executed within green thread.''' if self._request_timeout: # No timeout exception escapes the with block. with eventlet.timeout.Timeout(self._request_timeout, False): return self._handle_request() LOG.info('[%d] Request timeout.', self._rid()) self._request_error = Exception(_('Request timeout')) return None else: return self._handle_request() def _handle_request(self): '''First level request handling.''' attempt = 0 timeout = 0 response = None while response is None and attempt <= self._retries: eventlet.greenthread.sleep(timeout) attempt += 1 req = self._issue_request() # automatically raises any exceptions returned. if isinstance(req, httplib.HTTPResponse): timeout = 0 if attempt <= self._retries: if req.status in (httplib.UNAUTHORIZED, httplib.FORBIDDEN): continue elif req.status == httplib.SERVICE_UNAVAILABLE: timeout = 0.5 continue # else fall through to return the error code LOG.debug("[%(rid)d] Completed request '%(method)s %(url)s'" ": %(status)s", {'rid': self._rid(), 'method': self._method, 'url': self._url, 'status': req.status}) self._request_error = None response = req else: LOG.info('[%(rid)d] Error while handling request: ' '%(req)s', {'rid': self._rid(), 'req': req}) self._request_error = req response = None return response class LoginRequestEventlet(EventletApiRequest): '''Process a login request.''' def __init__(self, client_obj, user, password, client_conn=None, headers=None): if headers is None: headers = {} headers.update({"Content-Type": "application/x-www-form-urlencoded"}) body = parse.urlencode({"username": user, "password": password}) super(LoginRequestEventlet, self).__init__( client_obj, "/ws.v1/login", "POST", body, headers, auto_login=False, client_conn=client_conn) def session_cookie(self): if self.successful(): return self.value.getheader("Set-Cookie") return None class GetApiProvidersRequestEventlet(EventletApiRequest): '''Get a list of API providers.''' def __init__(self, client_obj): url = "/ws.v1/control-cluster/node?fields=roles" super(GetApiProvidersRequestEventlet, self).__init__( client_obj, url, "GET", auto_login=True) def api_providers(self): """Parse api_providers from response. Returns: api_providers in [(host, port, is_ssl), ...] format """ def _provider_from_listen_addr(addr): # (pssl|ptcp):: => (host, port, is_ssl) parts = addr.split(':') return (parts[1], int(parts[2]), parts[0] == 'pssl') try: if self.successful(): ret = [] body = jsonutils.loads(self.value.body) for node in body.get('results', []): for role in node.get('roles', []): if role.get('role') == 'api_provider': addr = role.get('listen_addr') if addr: ret.append(_provider_from_listen_addr(addr)) return ret except Exception as e: LOG.warning("[%(rid)d] Failed to parse API provider: %(e)s", {'rid': self._rid(), 'e': e}) # intentionally fall through return None class GenericRequestEventlet(EventletApiRequest): '''Handle a generic request.''' def __init__(self, client_obj, method, url, body, content_type, auto_login=False, http_timeout=request.DEFAULT_HTTP_TIMEOUT, retries=request.DEFAULT_RETRIES, redirects=request.DEFAULT_REDIRECTS): headers = {"Content-Type": content_type} super(GenericRequestEventlet, self).__init__( client_obj, url, method, body, headers, retries=retries, auto_login=auto_login, redirects=redirects, http_timeout=http_timeout) def session_cookie(self): if self.successful(): return self.value.getheader("Set-Cookie") return None request.ApiRequest.register(EventletApiRequest) vmware-nsx-12.0.1/vmware_nsx/api_client/base.py0000666000175100017510000002324013244523345021522 0ustar zuulzuul00000000000000# Copyright 2012 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import time from oslo_config import cfg from oslo_log import log as logging import six from six.moves import http_client as httplib from vmware_nsx import api_client LOG = logging.getLogger(__name__) GENERATION_ID_TIMEOUT = -1 DEFAULT_CONCURRENT_CONNECTIONS = 3 DEFAULT_CONNECT_TIMEOUT = 5 @six.add_metaclass(abc.ABCMeta) class ApiClientBase(object): """An abstract baseclass for all API client implementations.""" def _create_connection(self, host, port, is_ssl): if is_ssl: return httplib.HTTPSConnection(host, port, timeout=self._connect_timeout) return httplib.HTTPConnection(host, port, timeout=self._connect_timeout) @staticmethod def _conn_params(http_conn): is_ssl = isinstance(http_conn, httplib.HTTPSConnection) return (http_conn.host, http_conn.port, is_ssl) @property def user(self): return self._user @property def password(self): return self._password @property def config_gen(self): # If NSX_gen_timeout is not -1 then: # Maintain a timestamp along with the generation ID. Hold onto the # ID long enough to be useful and block on sequential requests but # not long enough to persist when Onix db is cleared, which resets # the generation ID, causing the DAL to block indefinitely with some # number that's higher than the cluster's value. if self._gen_timeout != -1: ts = self._config_gen_ts if ts is not None: if (time.time() - ts) > self._gen_timeout: return None return self._config_gen @config_gen.setter def config_gen(self, value): if self._config_gen != value: if self._gen_timeout != -1: self._config_gen_ts = time.time() self._config_gen = value def auth_cookie(self, conn): cookie = None data = self._get_provider_data(conn) if data: cookie = data[1] return cookie def set_auth_cookie(self, conn, cookie): data = self._get_provider_data(conn) if data: self._set_provider_data(conn, (data[0], cookie)) def acquire_connection(self, auto_login=True, headers=None, rid=-1): '''Check out an available HTTPConnection instance. Blocks until a connection is available. :auto_login: automatically logins before returning conn :headers: header to pass on to login attempt :param rid: request id passed in from request eventlet. :returns: An available HTTPConnection instance or None if no api_providers are configured. ''' if not self._api_providers: LOG.warning("[%d] no API providers currently available.", rid) return None if self._conn_pool.empty(): LOG.debug("[%d] Waiting to acquire API client connection.", rid) priority, conn = self._conn_pool.get() now = time.time() if getattr(conn, 'last_used', now) < now - cfg.CONF.conn_idle_timeout: LOG.info("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f " "seconds; reconnecting.", {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn), 'sec': now - conn.last_used}) conn = self._create_connection(*self._conn_params(conn)) conn.last_used = now conn.priority = priority # stash current priority for release qsize = self._conn_pool.qsize() LOG.debug("[%(rid)d] Acquired connection %(conn)s. %(qsize)d " "connection(s) available.", {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn), 'qsize': qsize}) if auto_login and self.auth_cookie(conn) is None: self._wait_for_login(conn, headers) return conn def release_connection(self, http_conn, bad_state=False, service_unavail=False, rid=-1): '''Mark HTTPConnection instance as available for check-out. :param http_conn: An HTTPConnection instance obtained from this instance. :param bad_state: True if http_conn is known to be in a bad state (e.g. connection fault.) :service_unavail: True if http_conn returned 503 response. :param rid: request id passed in from request eventlet. ''' conn_params = self._conn_params(http_conn) if self._conn_params(http_conn) not in self._api_providers: LOG.debug("[%(rid)d] Released connection %(conn)s is not an " "API provider for the cluster", {'rid': rid, 'conn': api_client.ctrl_conn_to_str(http_conn)}) return elif hasattr(http_conn, "no_release"): return priority = http_conn.priority if bad_state: # Reconnect to provider. LOG.warning("[%(rid)d] Connection returned in bad state, " "reconnecting to %(conn)s", {'rid': rid, 'conn': api_client.ctrl_conn_to_str(http_conn)}) http_conn = self._create_connection(*self._conn_params(http_conn)) elif service_unavail: # http_conn returned a service unaviable response, put other # connections to the same controller at end of priority queue, conns = [] while not self._conn_pool.empty(): priority, conn = self._conn_pool.get() if self._conn_params(conn) == conn_params: priority = self._next_conn_priority self._next_conn_priority += 1 conns.append((priority, conn)) for priority, conn in conns: self._conn_pool.put((priority, conn)) # put http_conn at end of queue also priority = self._next_conn_priority self._next_conn_priority += 1 self._conn_pool.put((priority, http_conn)) LOG.debug("[%(rid)d] Released connection %(conn)s. %(qsize)d " "connection(s) available.", {'rid': rid, 'conn': api_client.ctrl_conn_to_str(http_conn), 'qsize': self._conn_pool.qsize()}) def _wait_for_login(self, conn, headers=None): '''Block until a login has occurred for the current API provider.''' data = self._get_provider_data(conn) if data is None: LOG.error("Login request for an invalid connection: '%s'", api_client.ctrl_conn_to_str(conn)) return provider_sem = data[0] if provider_sem.acquire(blocking=False): try: cookie = self._login(conn, headers) self.set_auth_cookie(conn, cookie) finally: provider_sem.release() else: LOG.debug("Waiting for auth to complete") # Wait until we can acquire then release provider_sem.acquire(blocking=True) provider_sem.release() def _get_provider_data(self, conn_or_conn_params, default=None): """Get data for specified API provider. Args: conn_or_conn_params: either a HTTP(S)Connection object or the resolved conn_params tuple returned by self._conn_params(). default: conn_params if ones passed aren't known Returns: Data associated with specified provider """ conn_params = self._normalize_conn_params(conn_or_conn_params) return self._api_provider_data.get(conn_params, default) def _set_provider_data(self, conn_or_conn_params, data): """Set data for specified API provider. Args: conn_or_conn_params: either a HTTP(S)Connection object or the resolved conn_params tuple returned by self._conn_params(). data: data to associate with API provider """ conn_params = self._normalize_conn_params(conn_or_conn_params) if data is None: del self._api_provider_data[conn_params] else: self._api_provider_data[conn_params] = data def _normalize_conn_params(self, conn_or_conn_params): """Normalize conn_param tuple. Args: conn_or_conn_params: either a HTTP(S)Connection object or the resolved conn_params tuple returned by self._conn_params(). Returns: Normalized conn_param tuple """ if (not isinstance(conn_or_conn_params, tuple) and not isinstance(conn_or_conn_params, httplib.HTTPConnection)): LOG.debug("Invalid conn_params value: '%s'", str(conn_or_conn_params)) return conn_or_conn_params if isinstance(conn_or_conn_params, httplib.HTTPConnection): conn_params = self._conn_params(conn_or_conn_params) else: conn_params = conn_or_conn_params host, port, is_ssl = conn_params if port is None: port = 443 if is_ssl else 80 return (host, port, is_ssl) vmware-nsx-12.0.1/vmware_nsx/dhcp_meta/0000775000175100017510000000000013244524600020043 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/dhcp_meta/rpc.py0000666000175100017510000002471113244523345021215 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from eventlet import greenthread from neutron_lib import constants as const from neutron_lib import exceptions as ntn_exc from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.db import db_base_plugin_v2 from neutron.db import models_v2 from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc LOG = logging.getLogger(__name__) METADATA_DEFAULT_PREFIX = 30 METADATA_SUBNET_CIDR = '169.254.169.252/%d' % METADATA_DEFAULT_PREFIX METADATA_GATEWAY_IP = '169.254.169.253' METADATA_DHCP_ROUTE = '169.254.169.254/32' def handle_network_dhcp_access(plugin, context, network, action): pass def handle_port_dhcp_access(plugin, context, port_data, action): pass def handle_port_metadata_access(plugin, context, port, is_delete=False): # For instances supporting DHCP option 121 and created in a # DHCP-enabled but isolated network. This method is useful # only when no network namespace support. plugin_cfg = getattr(cfg.CONF, plugin.cfg_group) if (plugin_cfg.metadata_mode == config.MetadataModes.INDIRECT and port.get('device_owner') == const.DEVICE_OWNER_DHCP): if not port.get('fixed_ips'): # If port does not have an IP, the associated subnet is in # deleting state. LOG.info('Port %s has no IP due to subnet in deleting state', port['id']) return fixed_ip = port['fixed_ips'][0] query = context.session.query(models_v2.Subnet) subnet = query.filter( models_v2.Subnet.id == fixed_ip['subnet_id']).one() # If subnet does not have a gateway, do not create metadata # route. This is done via the enable_isolated_metadata # option if desired. if not subnet.get('gateway_ip'): LOG.info('Subnet %s does not have a gateway, the ' 'metadata route will not be created', subnet['id']) return metadata_routes = [r for r in subnet.routes if r['destination'] == METADATA_DHCP_ROUTE] if metadata_routes: # We should have only a single metadata route at any time # because the route logic forbids two routes with the same # destination. Update next hop with the provided IP address if not is_delete: metadata_routes[0].nexthop = fixed_ip['ip_address'] else: context.session.delete(metadata_routes[0]) else: # add the metadata route route = models_v2.SubnetRoute( subnet_id=subnet.id, destination=METADATA_DHCP_ROUTE, nexthop=fixed_ip['ip_address']) context.session.add(route) def handle_router_metadata_access(plugin, context, router_id, interface=None): # For instances created in a DHCP-disabled network but connected to # a router. # The parameter "interface" is only used as a Boolean flag to indicate # whether to add (True) or delete (False) an internal metadata network. plugin_cfg = getattr(cfg.CONF, plugin.cfg_group) if plugin_cfg.metadata_mode != config.MetadataModes.DIRECT: LOG.debug("Metadata access network is disabled") return if not cfg.CONF.allow_overlapping_ips: LOG.warning("Overlapping IPs must be enabled in order to setup " "the metadata access network") return ctx_elevated = context.elevated() on_demand = getattr(plugin_cfg, 'metadata_on_demand', False) try: if interface: # Add interface case filters = {'device_id': [router_id], 'device_owner': const.ROUTER_INTERFACE_OWNERS, 'fixed_ips': {'ip_address': [METADATA_GATEWAY_IP]}} # Retrieve metadata ports by calling database plugin ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports( plugin, ctx_elevated, filters=filters) if not ports and (not on_demand or _find_dhcp_disabled_subnet_by_router( plugin, ctx_elevated, router_id)): _create_metadata_access_network( plugin, ctx_elevated, router_id) else: # Remove interface case filters = {'device_id': [router_id], 'device_owner': const.ROUTER_INTERFACE_OWNERS} # Retrieve router interface ports by calling database plugin ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports( plugin, ctx_elevated, filters=filters) if len(ports) == 1 or (on_demand and not _find_dhcp_disabled_subnet_by_port( plugin, ctx_elevated, ports)): # Delete the internal metadata network if the router port # is the last port left or no more DHCP-disabled subnet # attached to the router. _destroy_metadata_access_network( plugin, ctx_elevated, router_id, ports) # TODO(salvatore-orlando): A better exception handling in the # NSX plugin would allow us to improve error handling here except (ntn_exc.NeutronException, nsx_exc.NsxPluginException, api_exc.NsxApiException): # Any exception here should be regarded as non-fatal LOG.exception("An error occurred while operating on the " "metadata access network for router:'%s'", router_id) def _find_metadata_port(plugin, context, ports): for port in ports: for fixed_ip in port['fixed_ips']: if fixed_ip['ip_address'] == METADATA_GATEWAY_IP: return port def _find_dhcp_disabled_subnet_by_port(plugin, context, ports): for port in ports: for fixed_ip in port['fixed_ips']: subnet = plugin.get_subnet(context, fixed_ip['subnet_id']) if not subnet['enable_dhcp']: return subnet def _find_dhcp_disabled_subnet_by_router(plugin, context, router_id): filters = {'device_id': [router_id], 'device_owner': const.ROUTER_INTERFACE_OWNERS} ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports( plugin, context, filters=filters) return _find_dhcp_disabled_subnet_by_port(plugin, context, ports) def _create_metadata_access_network(plugin, context, router_id): # Add network # Network name is likely to be truncated on NSX net_data = {'name': 'meta-%s' % router_id, 'tenant_id': '', # intentionally not set 'admin_state_up': True, 'port_security_enabled': False, 'shared': False, 'status': const.NET_STATUS_ACTIVE} meta_net = plugin.create_network(context, {'network': net_data}) greenthread.sleep(0) # yield plugin.schedule_network(context, meta_net) greenthread.sleep(0) # yield # From this point on there will be resources to garbage-collect # in case of failures meta_sub = None try: # Add subnet subnet_data = {'network_id': meta_net['id'], 'tenant_id': '', # intentionally not set 'name': 'meta-%s' % router_id, 'ip_version': 4, 'shared': False, 'cidr': METADATA_SUBNET_CIDR, 'enable_dhcp': True, # Ensure default allocation pool is generated 'allocation_pools': const.ATTR_NOT_SPECIFIED, 'gateway_ip': METADATA_GATEWAY_IP, 'dns_nameservers': [], 'host_routes': []} meta_sub = plugin.create_subnet(context, {'subnet': subnet_data}) greenthread.sleep(0) # yield plugin.add_router_interface(context, router_id, {'subnet_id': meta_sub['id']}) greenthread.sleep(0) # yield # Tell to start the metadata agent proxy, only if we had success _notify_rpc_agent(context, {'subnet': meta_sub}, 'subnet.create.end') except (ntn_exc.NeutronException, nsx_exc.NsxPluginException, api_exc.NsxApiException): # It is not necessary to explicitly delete the subnet # as it will be removed with the network plugin.delete_network(context, meta_net['id']) def _destroy_metadata_access_network(plugin, context, router_id, ports): if not ports: return meta_port = _find_metadata_port(plugin, context, ports) if not meta_port: return meta_net_id = meta_port['network_id'] meta_sub_id = meta_port['fixed_ips'][0]['subnet_id'] plugin.remove_router_interface( context, router_id, {'port_id': meta_port['id']}) greenthread.sleep(0) # yield context.session.expunge_all() try: # Remove network (this will remove the subnet too) plugin.delete_network(context, meta_net_id) greenthread.sleep(0) # yield except (ntn_exc.NeutronException, nsx_exc.NsxPluginException, api_exc.NsxApiException): # must re-add the router interface plugin.add_router_interface(context, router_id, {'subnet_id': meta_sub_id}) except db_exc.DBReferenceError as e: LOG.debug("Unable to delete network %s. Reason: %s", meta_net_id, e) # Tell to stop the metadata agent proxy _notify_rpc_agent( context, {'network': {'id': meta_net_id}}, 'network.delete.end') def _notify_rpc_agent(context, payload, event): if cfg.CONF.dhcp_agent_notification: dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() dhcp_notifier.notify(context, payload, event) vmware-nsx-12.0.1/vmware_nsx/dhcp_meta/combined.py0000666000175100017510000001013713244523345022206 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.common import topics from neutron_lib import constants as const from vmware_nsx.dhcp_meta import nsx as nsx_svc from vmware_nsx.dhcp_meta import rpc as nsx_rpc class DhcpAgentNotifyAPI(dhcp_rpc_agent_api.DhcpAgentNotifyAPI): def __init__(self, plugin, manager): super(DhcpAgentNotifyAPI, self).__init__(topic=topics.DHCP_AGENT) self.agentless_notifier = nsx_svc.DhcpAgentNotifyAPI(plugin, manager) def notify(self, context, data, methodname): [resource, action, _e] = methodname.split('.') lsn_manager = self.agentless_notifier.plugin.lsn_manager plugin = self.agentless_notifier.plugin if resource == 'network': net_id = data['network']['id'] elif resource in ['port', 'subnet']: net_id = data[resource]['network_id'] else: # no valid resource return lsn_exists = lsn_manager.lsn_exists(context, net_id) treat_dhcp_owner_specially = False if lsn_exists: # if lsn exists, the network is one created with the new model if (resource == 'subnet' and action == 'create' and const.DEVICE_OWNER_DHCP not in plugin.port_special_owners): # network/subnet provisioned in the new model have a plain # nsx lswitch port, no vif attachment plugin.port_special_owners.append(const.DEVICE_OWNER_DHCP) treat_dhcp_owner_specially = True if (resource == 'port' and action == 'update' or resource == 'subnet'): self.agentless_notifier.notify(context, data, methodname) elif not lsn_exists and resource in ['port', 'subnet']: # call notifier for the agent-based mode super(DhcpAgentNotifyAPI, self).notify(context, data, methodname) if treat_dhcp_owner_specially: # if subnets belong to networks created with the old model # dhcp port does not need to be special cased, so put things # back, since they were modified plugin.port_special_owners.remove(const.DEVICE_OWNER_DHCP) def handle_network_dhcp_access(plugin, context, network, action): nsx_svc.handle_network_dhcp_access(plugin, context, network, action) def handle_port_dhcp_access(plugin, context, port, action): if plugin.lsn_manager.lsn_exists(context, port['network_id']): nsx_svc.handle_port_dhcp_access(plugin, context, port, action) else: nsx_rpc.handle_port_dhcp_access(plugin, context, port, action) def handle_port_metadata_access(plugin, context, port, is_delete=False): if plugin.lsn_manager.lsn_exists(context, port['network_id']): nsx_svc.handle_port_metadata_access(plugin, context, port, is_delete) else: nsx_rpc.handle_port_metadata_access(plugin, context, port, is_delete) def handle_router_metadata_access(plugin, context, router_id, interface=None): if interface: subnet = plugin.get_subnet(context, interface['subnet_id']) network_id = subnet['network_id'] if plugin.lsn_manager.lsn_exists(context, network_id): nsx_svc.handle_router_metadata_access( plugin, context, router_id, interface) else: nsx_rpc.handle_router_metadata_access( plugin, context, router_id, interface) else: nsx_rpc.handle_router_metadata_access( plugin, context, router_id, interface) vmware-nsx-12.0.1/vmware_nsx/dhcp_meta/__init__.py0000666000175100017510000000000013244523345022151 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/dhcp_meta/nsx.py0000666000175100017510000003342313244523345021241 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib import constants as const from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron.db import db_base_plugin_v2 from neutron.db import l3_db from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as p_exc from vmware_nsx.dhcp_meta import constants as d_const from vmware_nsx.nsxlib.mh import lsn as lsn_api LOG = logging.getLogger(__name__) dhcp_opts = [ cfg.ListOpt('extra_domain_name_servers', deprecated_group='NVP_DHCP', default=[], help=_('Comma separated list of additional ' 'domain name servers')), cfg.StrOpt('domain_name', deprecated_group='NVP_DHCP', default='openstacklocal', help=_('Domain to use for building the hostnames')), cfg.IntOpt('default_lease_time', default=43200, deprecated_group='NVP_DHCP', help=_("Default DHCP lease time")), ] metadata_opts = [ cfg.StrOpt('metadata_server_address', deprecated_group='NVP_METADATA', default='127.0.0.1', help=_("IP address used by Metadata server.")), cfg.PortOpt('metadata_server_port', deprecated_group='NVP_METADATA', default=8775, help=_("TCP Port used by Metadata server.")), cfg.StrOpt('metadata_shared_secret', deprecated_group='NVP_METADATA', default='', help=_('When proxying metadata requests, Neutron signs the ' 'Instance-ID header with a shared secret to prevent ' 'spoofing. You may select any string for a secret, ' 'but it MUST match with the configuration used by the ' 'Metadata server.'), secret=True) ] def register_dhcp_opts(config): config.CONF.register_opts(dhcp_opts, group="NSX_DHCP") def register_metadata_opts(config): config.CONF.register_opts(metadata_opts, group="NSX_METADATA") class DhcpAgentNotifyAPI(object): def __init__(self, plugin, lsn_manager): self.plugin = plugin self.lsn_manager = lsn_manager self._handle_subnet_dhcp_access = {'create': self._subnet_create, 'update': self._subnet_update, 'delete': self._subnet_delete} def notify(self, context, data, methodname): [resource, action, _e] = methodname.split('.') if resource == 'subnet': self._handle_subnet_dhcp_access[action](context, data['subnet']) elif resource == 'port' and action == 'update': self._port_update(context, data['port']) def _port_update(self, context, port): # With no fixed IP's there's nothing that can be updated if not port["fixed_ips"]: return network_id = port['network_id'] subnet_id = port["fixed_ips"][0]['subnet_id'] filters = {'network_id': [network_id]} # Because NSX does not support updating a single host entry we # got to build the whole list from scratch and update in bulk ports = self.plugin.get_ports(context, filters) if not ports: return dhcp_conf = [ {'mac_address': p['mac_address'], 'ip_address': p["fixed_ips"][0]['ip_address']} for p in ports if is_user_port(p) ] meta_conf = [ {'instance_id': p['device_id'], 'ip_address': p["fixed_ips"][0]['ip_address']} for p in ports if is_user_port(p, check_dev_id=True) ] self.lsn_manager.lsn_port_update( context, network_id, subnet_id, dhcp=dhcp_conf, meta=meta_conf) def _subnet_create(self, context, subnet, clean_on_err=True): if subnet['enable_dhcp']: network_id = subnet['network_id'] # Create port for DHCP service dhcp_port = { "name": "", "admin_state_up": True, "device_id": "", "device_owner": const.DEVICE_OWNER_DHCP, "network_id": network_id, "tenant_id": subnet["tenant_id"], "mac_address": const.ATTR_NOT_SPECIFIED, "fixed_ips": [{"subnet_id": subnet['id']}] } try: # This will end up calling handle_port_dhcp_access # down below as well as handle_port_metadata_access self.plugin.create_port(context, {'port': dhcp_port}) except p_exc.PortConfigurationError as e: LOG.error("Error while creating subnet %(cidr)s for " "network %(network)s. Please, contact " "administrator", {"cidr": subnet["cidr"], "network": network_id}) db_base_plugin_v2.NeutronDbPluginV2.delete_port( self.plugin, context, e.port_id) if clean_on_err: self.plugin.delete_subnet(context, subnet['id']) raise n_exc.Conflict() def _subnet_update(self, context, subnet): network_id = subnet['network_id'] try: lsn_id, lsn_port_id = self.lsn_manager.lsn_port_get( context, network_id, subnet['id']) self.lsn_manager.lsn_port_dhcp_configure( context, lsn_id, lsn_port_id, subnet) except p_exc.LsnPortNotFound: # It's possible that the subnet was created with dhcp off; # check if the subnet was uplinked onto a router, and if so # remove the patch attachment between the metadata port and # the lsn port, in favor on the one we'll be creating during # _subnet_create self.lsn_manager.lsn_port_dispose( context, network_id, d_const.METADATA_MAC) # also, check that a dhcp port exists first and provision it # accordingly filters = dict(network_id=[network_id], device_owner=[const.DEVICE_OWNER_DHCP]) ports = self.plugin.get_ports(context, filters=filters) if ports: handle_port_dhcp_access( self.plugin, context, ports[0], 'create_port') else: self._subnet_create(context, subnet, clean_on_err=False) def _subnet_delete(self, context, subnet): # FIXME(armando-migliaccio): it looks like that a subnet filter # is ineffective; so filter by network for now. network_id = subnet['network_id'] filters = dict(network_id=[network_id], device_owner=[const.DEVICE_OWNER_DHCP]) # FIXME(armando-migliaccio): this may be race-y ports = self.plugin.get_ports(context, filters=filters) if ports: # This will end up calling handle_port_dhcp_access # down below as well as handle_port_metadata_access self.plugin.delete_port(context, ports[0]['id']) def is_user_port(p, check_dev_id=False): usable = p['fixed_ips'] and p['device_owner'] not in d_const.SPECIAL_OWNERS return usable if not check_dev_id else usable and p['device_id'] def check_services_requirements(cluster): ver = cluster.api_client.get_version() # 4.1 is the first and only release where DHCP in NSX # will have this feature, as experimental if ver.major == 4 and ver.minor == 1: cluster_id = cfg.CONF.default_service_cluster_uuid if not lsn_api.service_cluster_exists(cluster, cluster_id): raise p_exc.ServiceClusterUnavailable(cluster_id=cluster_id) else: raise p_exc.InvalidVersion(version=ver) def handle_network_dhcp_access(plugin, context, network, action): LOG.info("Performing DHCP %(action)s for resource: %(resource)s", {"action": action, "resource": network}) if action == 'create_network': network_id = network['id'] if network.get(extnet_apidef.EXTERNAL): LOG.info("Network %s is external: no LSN to create", network_id) return plugin.lsn_manager.lsn_create(context, network_id) elif action == 'delete_network': # NOTE(armando-migliaccio): on delete_network, network # is just the network id network_id = network plugin.lsn_manager.lsn_delete_by_network(context, network_id) LOG.info("Logical Services Node for network " "%s configured successfully", network_id) def handle_port_dhcp_access(plugin, context, port, action): LOG.info("Performing DHCP %(action)s for resource: %(resource)s", {"action": action, "resource": port}) if port["device_owner"] == const.DEVICE_OWNER_DHCP: network_id = port["network_id"] if action == "create_port": # at this point the port must have a subnet and a fixed ip subnet_id = port["fixed_ips"][0]['subnet_id'] subnet = plugin.get_subnet(context, subnet_id) subnet_data = { "mac_address": port["mac_address"], "ip_address": subnet['cidr'], "subnet_id": subnet['id'] } try: plugin.lsn_manager.lsn_port_dhcp_setup( context, network_id, port['id'], subnet_data, subnet) except p_exc.PortConfigurationError: LOG.error("Error while configuring DHCP for " "port %s", port['id']) raise n_exc.NeutronException() elif action == "delete_port": plugin.lsn_manager.lsn_port_dispose(context, network_id, port['mac_address']) elif port["device_owner"] != const.DEVICE_OWNER_DHCP: if port.get("fixed_ips"): # do something only if there are IP's and dhcp is enabled subnet_id = port["fixed_ips"][0]['subnet_id'] if not plugin.get_subnet(context, subnet_id)['enable_dhcp']: LOG.info("DHCP is disabled for subnet %s: nothing " "to do", subnet_id) return host_data = { "mac_address": port["mac_address"], "ip_address": port["fixed_ips"][0]['ip_address'] } network_id = port["network_id"] if action == "create_port": handler = plugin.lsn_manager.lsn_port_dhcp_host_add elif action == "delete_port": handler = plugin.lsn_manager.lsn_port_dhcp_host_remove try: handler(context, network_id, subnet_id, host_data) except p_exc.PortConfigurationError: with excutils.save_and_reraise_exception(): if action == 'create_port': db_base_plugin_v2.NeutronDbPluginV2.delete_port( plugin, context, port['id']) LOG.info("DHCP for port %s configured successfully", port['id']) def handle_port_metadata_access(plugin, context, port, is_delete=False): if is_user_port(port, check_dev_id=True): network_id = port["network_id"] network = plugin.get_network(context, network_id) if network[extnet_apidef.EXTERNAL]: LOG.info("Network %s is external: nothing to do", network_id) return subnet_id = port["fixed_ips"][0]['subnet_id'] host_data = { "instance_id": port["device_id"], "tenant_id": port["tenant_id"], "ip_address": port["fixed_ips"][0]['ip_address'] } LOG.info("Configuring metadata entry for port %s", port) if not is_delete: handler = plugin.lsn_manager.lsn_port_meta_host_add else: handler = plugin.lsn_manager.lsn_port_meta_host_remove try: handler(context, network_id, subnet_id, host_data) except p_exc.PortConfigurationError: with excutils.save_and_reraise_exception(): if not is_delete: db_base_plugin_v2.NeutronDbPluginV2.delete_port( plugin, context, port['id']) LOG.info("Metadata for port %s configured successfully", port['id']) def handle_router_metadata_access(plugin, context, router_id, interface=None): LOG.info("Handle metadata access via router: %(r)s and " "interface %(i)s", {'r': router_id, 'i': interface}) if interface: try: plugin.get_port(context, interface['port_id']) is_enabled = True except n_exc.NotFound: is_enabled = False subnet_id = interface['subnet_id'] try: plugin.lsn_manager.lsn_metadata_configure( context, subnet_id, is_enabled) except p_exc.NsxPluginException: with excutils.save_and_reraise_exception(): if is_enabled: l3_db.L3_NAT_db_mixin.remove_router_interface( plugin, context, router_id, interface) LOG.info("Metadata for router %s handled successfully", router_id) vmware-nsx-12.0.1/vmware_nsx/dhcp_meta/lsnmanager.py0000666000175100017510000005265113244523345022564 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import exceptions as p_exc from vmware_nsx.common import nsx_utils from vmware_nsx.db import lsn_db from vmware_nsx.dhcp_meta import constants as const from vmware_nsx.nsxlib.mh import lsn as lsn_api from vmware_nsx.nsxlib.mh import switch as switch_api LOG = logging.getLogger(__name__) META_CONF = 'metadata-proxy' DHCP_CONF = 'dhcp' lsn_opts = [ cfg.BoolOpt('sync_on_missing_data', default=False, help=_('Pull LSN information from NSX in case it is missing ' 'from the local data store. This is useful to rebuild ' 'the local store in case of server recovery.')) ] def register_lsn_opts(config): config.CONF.register_opts(lsn_opts, "NSX_LSN") class LsnManager(object): """Manage LSN entities associated with networks.""" def __init__(self, plugin): self.plugin = plugin @property def cluster(self): return self.plugin.cluster def lsn_exists(self, context, network_id): """Return True if a Logical Service Node exists for the network.""" return self.lsn_get( context, network_id, raise_on_err=False) is not None def lsn_get(self, context, network_id, raise_on_err=True): """Retrieve the LSN id associated to the network.""" try: return lsn_api.lsn_for_network_get(self.cluster, network_id) except (n_exc.NotFound, api_exc.NsxApiException): if raise_on_err: LOG.error('Unable to find Logical Service Node for ' 'network %s.', network_id) raise p_exc.LsnNotFound(entity='network', entity_id=network_id) else: LOG.warning('Unable to find Logical Service Node for ' 'the requested network %s.', network_id) def lsn_create(self, context, network_id): """Create a LSN associated to the network.""" try: return lsn_api.lsn_for_network_create(self.cluster, network_id) except api_exc.NsxApiException: err_msg = _('Unable to create LSN for network %s') % network_id raise p_exc.NsxPluginException(err_msg=err_msg) def lsn_delete(self, context, lsn_id): """Delete a LSN given its id.""" try: lsn_api.lsn_delete(self.cluster, lsn_id) except (n_exc.NotFound, api_exc.NsxApiException): LOG.warning('Unable to delete Logical Service Node %s', lsn_id) def lsn_delete_by_network(self, context, network_id): """Delete a LSN associated to the network.""" lsn_id = self.lsn_get(context, network_id, raise_on_err=False) if lsn_id: self.lsn_delete(context, lsn_id) def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True): """Retrieve LSN and LSN port for the network and the subnet.""" lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err) if lsn_id: try: lsn_port_id = lsn_api.lsn_port_by_subnet_get( self.cluster, lsn_id, subnet_id) except (n_exc.NotFound, api_exc.NsxApiException): if raise_on_err: LOG.error('Unable to find Logical Service Node Port ' 'for LSN %(lsn_id)s and subnet ' '%(subnet_id)s', {'lsn_id': lsn_id, 'subnet_id': subnet_id}) raise p_exc.LsnPortNotFound(lsn_id=lsn_id, entity='subnet', entity_id=subnet_id) else: LOG.warning('Unable to find Logical Service Node Port ' 'for LSN %(lsn_id)s and subnet ' '%(subnet_id)s', {'lsn_id': lsn_id, 'subnet_id': subnet_id}) return (lsn_id, None) else: return (lsn_id, lsn_port_id) else: return (None, None) def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True): """Retrieve LSN and LSN port given network and mac address.""" lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err) if lsn_id: try: lsn_port_id = lsn_api.lsn_port_by_mac_get( self.cluster, lsn_id, mac) except (n_exc.NotFound, api_exc.NsxApiException): if raise_on_err: LOG.error('Unable to find Logical Service Node Port ' 'for LSN %(lsn_id)s and mac address ' '%(mac)s', {'lsn_id': lsn_id, 'mac': mac}) raise p_exc.LsnPortNotFound(lsn_id=lsn_id, entity='MAC', entity_id=mac) else: LOG.warning('Unable to find Logical Service Node ' 'Port for LSN %(lsn_id)s and mac address ' '%(mac)s', {'lsn_id': lsn_id, 'mac': mac}) return (lsn_id, None) else: return (lsn_id, lsn_port_id) else: return (None, None) def lsn_port_create(self, context, lsn_id, subnet_info): """Create and return LSN port for associated subnet.""" try: return lsn_api.lsn_port_create(self.cluster, lsn_id, subnet_info) except n_exc.NotFound: raise p_exc.LsnNotFound(entity='', entity_id=lsn_id) except api_exc.NsxApiException: err_msg = _('Unable to create port for LSN %s') % lsn_id raise p_exc.NsxPluginException(err_msg=err_msg) def lsn_port_delete(self, context, lsn_id, lsn_port_id): """Delete a LSN port from the Logical Service Node.""" try: lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) except (n_exc.NotFound, api_exc.NsxApiException): LOG.warning('Unable to delete LSN Port %s', lsn_port_id) def lsn_port_dispose(self, context, network_id, mac_address): """Delete a LSN port given the network and the mac address.""" lsn_id, lsn_port_id = self.lsn_port_get_by_mac( context, network_id, mac_address, raise_on_err=False) if lsn_port_id: self.lsn_port_delete(context, lsn_id, lsn_port_id) if mac_address == const.METADATA_MAC: try: lswitch_port_id = switch_api.get_port_by_neutron_tag( self.cluster, network_id, const.METADATA_PORT_ID)['uuid'] switch_api.delete_port( self.cluster, network_id, lswitch_port_id) except (n_exc.PortNotFoundOnNetwork, api_exc.NsxApiException): LOG.warning("Metadata port not found while attempting " "to delete it from network %s", network_id) else: LOG.warning("Unable to find Logical Services Node " "Port with MAC %s", mac_address) def lsn_port_dhcp_setup( self, context, network_id, port_id, port_data, subnet_config=None): """Connect network to LSN via specified port and port_data.""" try: lsn_id = None switch_id = nsx_utils.get_nsx_switch_ids( context.session, self.cluster, network_id)[0] lswitch_port_id = switch_api.get_port_by_neutron_tag( self.cluster, switch_id, port_id)['uuid'] lsn_id = self.lsn_get(context, network_id) lsn_port_id = self.lsn_port_create(context, lsn_id, port_data) except (n_exc.NotFound, p_exc.NsxPluginException): raise p_exc.PortConfigurationError( net_id=network_id, lsn_id=lsn_id, port_id=port_id) else: try: lsn_api.lsn_port_plug_network( self.cluster, lsn_id, lsn_port_id, lswitch_port_id) except p_exc.LsnConfigurationConflict: self.lsn_port_delete(context, lsn_id, lsn_port_id) raise p_exc.PortConfigurationError( net_id=network_id, lsn_id=lsn_id, port_id=port_id) if subnet_config: self.lsn_port_dhcp_configure( context, lsn_id, lsn_port_id, subnet_config) else: return (lsn_id, lsn_port_id) def lsn_port_metadata_setup(self, context, lsn_id, subnet): """Connect subnet to specified LSN.""" data = { "mac_address": const.METADATA_MAC, "ip_address": subnet['cidr'], "subnet_id": subnet['id'] } network_id = subnet['network_id'] tenant_id = subnet['tenant_id'] lswitch_port_id = None try: switch_id = nsx_utils.get_nsx_switch_ids( context.session, self.cluster, network_id)[0] lswitch_port_id = switch_api.create_lport( self.cluster, switch_id, tenant_id, const.METADATA_PORT_ID, const.METADATA_PORT_NAME, const.METADATA_DEVICE_ID, True)['uuid'] lsn_port_id = self.lsn_port_create(context, lsn_id, data) except (n_exc.NotFound, p_exc.NsxPluginException, api_exc.NsxApiException): raise p_exc.PortConfigurationError( net_id=network_id, lsn_id=lsn_id, port_id=lswitch_port_id) else: try: lsn_api.lsn_port_plug_network( self.cluster, lsn_id, lsn_port_id, lswitch_port_id) except p_exc.LsnConfigurationConflict: self.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) switch_api.delete_port( self.cluster, network_id, lswitch_port_id) raise p_exc.PortConfigurationError( net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) def lsn_port_dhcp_configure(self, context, lsn_id, lsn_port_id, subnet): """Enable/disable dhcp services with the given config options.""" is_enabled = subnet["enable_dhcp"] dhcp_options = { "domain_name": cfg.CONF.NSX_DHCP.domain_name, "default_lease_time": cfg.CONF.NSX_DHCP.default_lease_time, } dns_servers = cfg.CONF.NSX_DHCP.extra_domain_name_servers or [] dns_servers.extend(subnet["dns_nameservers"]) if subnet['gateway_ip']: dhcp_options["routers"] = subnet["gateway_ip"] if dns_servers: dhcp_options["domain_name_servers"] = ",".join(dns_servers) if subnet["host_routes"]: dhcp_options["classless_static_routes"] = ( ",".join(subnet["host_routes"]) ) try: lsn_api.lsn_port_dhcp_configure( self.cluster, lsn_id, lsn_port_id, is_enabled, dhcp_options) except (n_exc.NotFound, api_exc.NsxApiException): err_msg = (_('Unable to configure dhcp for Logical Service ' 'Node %(lsn_id)s and port %(lsn_port_id)s') % {'lsn_id': lsn_id, 'lsn_port_id': lsn_port_id}) LOG.error(err_msg) raise p_exc.NsxPluginException(err_msg=err_msg) def lsn_metadata_configure(self, context, subnet_id, is_enabled): """Configure metadata service for the specified subnet.""" subnet = self.plugin.get_subnet(context, subnet_id) network_id = subnet['network_id'] meta_conf = cfg.CONF.NSX_METADATA metadata_options = { 'metadata_server_ip': meta_conf.metadata_server_address, 'metadata_server_port': meta_conf.metadata_server_port, 'metadata_proxy_shared_secret': meta_conf.metadata_shared_secret } try: lsn_id = self.lsn_get(context, network_id) lsn_api.lsn_metadata_configure( self.cluster, lsn_id, is_enabled, metadata_options) except (p_exc.LsnNotFound, api_exc.NsxApiException): err_msg = (_('Unable to configure metadata ' 'for subnet %s') % subnet_id) LOG.error(err_msg) raise p_exc.NsxPluginException(err_msg=err_msg) if is_enabled: try: # test that the lsn port exists self.lsn_port_get(context, network_id, subnet_id) except p_exc.LsnPortNotFound: # this might happen if subnet had dhcp off when created # so create one, and wire it self.lsn_port_metadata_setup(context, lsn_id, subnet) else: self.lsn_port_dispose(context, network_id, const.METADATA_MAC) def _lsn_port_host_conf(self, context, network_id, subnet_id, data, hdlr): lsn_id, lsn_port_id = self.lsn_port_get( context, network_id, subnet_id, raise_on_err=False) try: if lsn_id and lsn_port_id: hdlr(self.cluster, lsn_id, lsn_port_id, data) except (n_exc.NotFound, api_exc.NsxApiException): LOG.error('Error while configuring LSN ' 'port %s', lsn_port_id) raise p_exc.PortConfigurationError( net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) def lsn_port_dhcp_host_add(self, context, network_id, subnet_id, host): """Add dhcp host entry to LSN port configuration.""" self._lsn_port_host_conf(context, network_id, subnet_id, host, lsn_api.lsn_port_dhcp_host_add) def lsn_port_dhcp_host_remove(self, context, network_id, subnet_id, host): """Remove dhcp host entry from LSN port configuration.""" self._lsn_port_host_conf(context, network_id, subnet_id, host, lsn_api.lsn_port_dhcp_host_remove) def lsn_port_meta_host_add(self, context, network_id, subnet_id, host): """Add dhcp host entry to LSN port configuration.""" self._lsn_port_host_conf(context, network_id, subnet_id, host, lsn_api.lsn_port_metadata_host_add) def lsn_port_meta_host_remove(self, context, network_id, subnet_id, host): """Remove dhcp host entry from LSN port configuration.""" self._lsn_port_host_conf(context, network_id, subnet_id, host, lsn_api.lsn_port_metadata_host_remove) def lsn_port_update( self, context, network_id, subnet_id, dhcp=None, meta=None): """Update the specified configuration for the LSN port.""" if not dhcp and not meta: return try: lsn_id, lsn_port_id = self.lsn_port_get( context, network_id, subnet_id, raise_on_err=False) if dhcp and lsn_id and lsn_port_id: lsn_api.lsn_port_host_entries_update( self.cluster, lsn_id, lsn_port_id, DHCP_CONF, dhcp) if meta and lsn_id and lsn_port_id: lsn_api.lsn_port_host_entries_update( self.cluster, lsn_id, lsn_port_id, META_CONF, meta) except api_exc.NsxApiException: raise p_exc.PortConfigurationError( net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) class PersistentLsnManager(LsnManager): """Add local persistent state to LSN Manager.""" def __init__(self, plugin): super(PersistentLsnManager, self).__init__(plugin) self.sync_on_missing = cfg.CONF.NSX_LSN.sync_on_missing_data def lsn_get(self, context, network_id, raise_on_err=True): try: obj = lsn_db.lsn_get_for_network( context, network_id, raise_on_err=raise_on_err) return obj.lsn_id if obj else None except p_exc.LsnNotFound: with excutils.save_and_reraise_exception() as ctxt: ctxt.reraise = False if self.sync_on_missing: lsn_id = super(PersistentLsnManager, self).lsn_get( context, network_id, raise_on_err=raise_on_err) self.lsn_save(context, network_id, lsn_id) return lsn_id if raise_on_err: ctxt.reraise = True def lsn_save(self, context, network_id, lsn_id): """Save LSN-Network mapping to the DB.""" try: lsn_db.lsn_add(context, network_id, lsn_id) except db_exc.DBError: err_msg = _('Unable to save LSN for network %s') % network_id LOG.exception(err_msg) raise p_exc.NsxPluginException(err_msg=err_msg) def lsn_create(self, context, network_id): lsn_id = super(PersistentLsnManager, self).lsn_create(context, network_id) try: self.lsn_save(context, network_id, lsn_id) except p_exc.NsxPluginException: with excutils.save_and_reraise_exception(): super(PersistentLsnManager, self).lsn_delete(context, lsn_id) return lsn_id def lsn_delete(self, context, lsn_id): lsn_db.lsn_remove(context, lsn_id) super(PersistentLsnManager, self).lsn_delete(context, lsn_id) def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True): try: obj = lsn_db.lsn_port_get_for_subnet( context, subnet_id, raise_on_err=raise_on_err) return (obj.lsn_id, obj.lsn_port_id) if obj else (None, None) except p_exc.LsnPortNotFound: with excutils.save_and_reraise_exception() as ctxt: ctxt.reraise = False if self.sync_on_missing: lsn_id, lsn_port_id = ( super(PersistentLsnManager, self).lsn_port_get( context, network_id, subnet_id, raise_on_err=raise_on_err)) mac_addr = lsn_api.lsn_port_info_get( self.cluster, lsn_id, lsn_port_id)['mac_address'] self.lsn_port_save( context, lsn_port_id, subnet_id, mac_addr, lsn_id) return (lsn_id, lsn_port_id) if raise_on_err: ctxt.reraise = True def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True): try: obj = lsn_db.lsn_port_get_for_mac( context, mac, raise_on_err=raise_on_err) return (obj.lsn_id, obj.lsn_port_id) if obj else (None, None) except p_exc.LsnPortNotFound: with excutils.save_and_reraise_exception() as ctxt: ctxt.reraise = False if self.sync_on_missing: lsn_id, lsn_port_id = ( super(PersistentLsnManager, self).lsn_port_get_by_mac( context, network_id, mac, raise_on_err=raise_on_err)) subnet_id = lsn_api.lsn_port_info_get( self.cluster, lsn_id, lsn_port_id).get('subnet_id') self.lsn_port_save( context, lsn_port_id, subnet_id, mac, lsn_id) return (lsn_id, lsn_port_id) if raise_on_err: ctxt.reraise = True def lsn_port_save(self, context, lsn_port_id, subnet_id, mac_addr, lsn_id): """Save LSN Port information to the DB.""" try: lsn_db.lsn_port_add_for_lsn( context, lsn_port_id, subnet_id, mac_addr, lsn_id) except db_exc.DBError: err_msg = _('Unable to save LSN port for subnet %s') % subnet_id LOG.exception(err_msg) raise p_exc.NsxPluginException(err_msg=err_msg) def lsn_port_create(self, context, lsn_id, subnet_info): lsn_port_id = super(PersistentLsnManager, self).lsn_port_create(context, lsn_id, subnet_info) try: self.lsn_port_save(context, lsn_port_id, subnet_info['subnet_id'], subnet_info['mac_address'], lsn_id) except p_exc.NsxPluginException: with excutils.save_and_reraise_exception(): super(PersistentLsnManager, self).lsn_port_delete( context, lsn_id, lsn_port_id) return lsn_port_id def lsn_port_delete(self, context, lsn_id, lsn_port_id): lsn_db.lsn_port_remove(context, lsn_port_id) super(PersistentLsnManager, self).lsn_port_delete( context, lsn_id, lsn_port_id) vmware-nsx-12.0.1/vmware_nsx/dhcp_meta/modes.py0000666000175100017510000001755613244523345021551 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib import constants as const from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.rpc.handlers import dhcp_rpc from neutron.api.rpc.handlers import metadata_rpc from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.db import agents_db from vmware_nsx._i18n import _ from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.dhcp_meta import combined from vmware_nsx.dhcp_meta import lsnmanager from vmware_nsx.dhcp_meta import migration from vmware_nsx.dhcp_meta import nsx as nsx_svc from vmware_nsx.dhcp_meta import rpc as nsx_rpc from vmware_nsx.extensions import lsn LOG = logging.getLogger(__name__) class SynchronizedDhcpRpcCallback(dhcp_rpc.DhcpRpcCallback): """DHCP RPC callbakcs synchronized with VMware plugin mutex.""" @lockutils.synchronized('vmware', 'neutron-') def create_dhcp_port(self, context, **kwargs): return super(SynchronizedDhcpRpcCallback, self).create_dhcp_port( context, **kwargs) class DhcpMetadataAccess(object): def setup_dhcpmeta_access(self): """Initialize support for DHCP and Metadata services.""" self._init_extensions() if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENT: self._setup_rpc_dhcp_metadata() mod = nsx_rpc elif cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS: self._setup_nsx_dhcp_metadata() mod = nsx_svc elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED: notifier = self._setup_nsx_dhcp_metadata() self._setup_rpc_dhcp_metadata(notifier=notifier) mod = combined else: error = _("Invalid agent_mode: %s") % cfg.CONF.NSX.agent_mode LOG.error(error) raise nsx_exc.NsxPluginException(err_msg=error) self.handle_network_dhcp_access_delegate = ( mod.handle_network_dhcp_access ) self.handle_port_dhcp_access_delegate = ( mod.handle_port_dhcp_access ) self.handle_port_metadata_access_delegate = ( mod.handle_port_metadata_access ) self.handle_metadata_access_delegate = ( mod.handle_router_metadata_access ) def _setup_rpc_dhcp_metadata(self, notifier=None): self.topic = topics.PLUGIN self.conn = n_rpc.create_connection() self.endpoints = [SynchronizedDhcpRpcCallback(), agents_db.AgentExtRpcCallback(), metadata_rpc.MetadataRpcCallback()] self.conn.create_consumer(self.topic, self.endpoints, fanout=False) self.conn.create_consumer(topics.REPORTS, [agents_db.AgentExtRpcCallback()], fanout=False) self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( notifier or dhcp_rpc_agent_api.DhcpAgentNotifyAPI()) self.conn.consume_in_threads() self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver ) self.supported_extension_aliases.extend( ['agent', 'dhcp_agent_scheduler']) def _setup_nsx_dhcp_metadata(self): self._check_services_requirements() nsx_svc.register_dhcp_opts(cfg) nsx_svc.register_metadata_opts(cfg) lsnmanager.register_lsn_opts(cfg) lsn_manager = lsnmanager.PersistentLsnManager(self.safe_reference) self.lsn_manager = lsn_manager if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS: notifier = nsx_svc.DhcpAgentNotifyAPI(self.safe_reference, lsn_manager) self.agent_notifiers[const.AGENT_TYPE_DHCP] = notifier # In agentless mode, ports whose owner is DHCP need to # be special cased; so add it to the list of special # owners list if const.DEVICE_OWNER_DHCP not in self.port_special_owners: self.port_special_owners.append(const.DEVICE_OWNER_DHCP) elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED: # This becomes ineffective, as all new networks creations # are handled by Logical Services Nodes in NSX cfg.CONF.set_override('network_auto_schedule', False) LOG.warning('network_auto_schedule has been disabled') notifier = combined.DhcpAgentNotifyAPI(self.safe_reference, lsn_manager) self.supported_extension_aliases.append(lsn.EXT_ALIAS) # Add the capability to migrate dhcp and metadata services over self.migration_manager = ( migration.MigrationManager( self.safe_reference, lsn_manager, notifier)) return notifier def _init_extensions(self): extensions = (lsn.EXT_ALIAS, 'agent', 'dhcp_agent_scheduler') for ext in extensions: if ext in self.supported_extension_aliases: self.supported_extension_aliases.remove(ext) def _check_services_requirements(self): try: error = None nsx_svc.check_services_requirements(self.cluster) except nsx_exc.InvalidVersion: error = _("Unable to run Neutron with config option '%s', as NSX " "does not support it") % cfg.CONF.NSX.agent_mode except nsx_exc.ServiceClusterUnavailable: error = _("Unmet dependency for config option " "'%s'") % cfg.CONF.NSX.agent_mode if error: LOG.error(error) raise nsx_exc.NsxPluginException(err_msg=error) def get_lsn(self, context, network_id, fields=None): report = self.migration_manager.report(context, network_id) return {'network': network_id, 'report': report} def create_lsn(self, context, lsn): network_id = lsn['lsn']['network'] subnet = self.migration_manager.validate(context, network_id) subnet_id = None if not subnet else subnet['id'] self.migration_manager.migrate(context, network_id, subnet) r = self.migration_manager.report(context, network_id, subnet_id) return {'network': network_id, 'report': r} def handle_network_dhcp_access(self, context, network, action): self.handle_network_dhcp_access_delegate(self.safe_reference, context, network, action) def handle_port_dhcp_access(self, context, port_data, action): self.handle_port_dhcp_access_delegate(self.safe_reference, context, port_data, action) def handle_port_metadata_access(self, context, port, is_delete=False): self.handle_port_metadata_access_delegate(self.safe_reference, context, port, is_delete) def handle_router_metadata_access(self, context, router_id, interface=None): self.handle_metadata_access_delegate(self.safe_reference, context, router_id, interface) vmware-nsx-12.0.1/vmware_nsx/dhcp_meta/constants.py0000666000175100017510000000216013244523345022437 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron.db import l3_db from neutron_lib import constants as const # A unique MAC to quickly identify the LSN port used for metadata services # when dhcp on the subnet is off. Inspired by leet-speak for 'metadata'. METADATA_MAC = "fa:15:73:74:d4:74" METADATA_PORT_ID = 'metadata:id' METADATA_PORT_NAME = 'metadata:name' METADATA_DEVICE_ID = 'metadata:device' SPECIAL_OWNERS = (const.DEVICE_OWNER_DHCP, const.DEVICE_OWNER_ROUTER_GW, l3_db.DEVICE_OWNER_ROUTER_INTF) vmware-nsx-12.0.1/vmware_nsx/dhcp_meta/migration.py0000666000175100017510000001663613244523345022431 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib import constants as const from neutron_lib import exceptions as n_exc from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as p_exc from vmware_nsx.dhcp_meta import nsx from vmware_nsx.dhcp_meta import rpc LOG = logging.getLogger(__name__) class DhcpMetadataBuilder(object): def __init__(self, plugin, agent_notifier): self.plugin = plugin self.notifier = agent_notifier def dhcp_agent_get_all(self, context, network_id): """Return the agents managing the network.""" return self.plugin.list_dhcp_agents_hosting_network( context, network_id)['agents'] def dhcp_port_get_all(self, context, network_id): """Return the dhcp ports allocated for the network.""" filters = { 'network_id': [network_id], 'device_owner': [const.DEVICE_OWNER_DHCP] } return self.plugin.get_ports(context, filters=filters) def router_id_get(self, context, subnet=None): """Return the router and interface used for the subnet.""" if not subnet: return network_id = subnet['network_id'] filters = { 'network_id': [network_id], 'device_owner': [const.DEVICE_OWNER_ROUTER_INTF] } ports = self.plugin.get_ports(context, filters=filters) for port in ports: if port['fixed_ips'][0]['subnet_id'] == subnet['id']: return port['device_id'] def metadata_deallocate(self, context, router_id, subnet_id): """Deallocate metadata services for the subnet.""" interface = {'subnet_id': subnet_id} self.plugin.remove_router_interface(context, router_id, interface) def metadata_allocate(self, context, router_id, subnet_id): """Allocate metadata resources for the subnet via the router.""" interface = {'subnet_id': subnet_id} self.plugin.add_router_interface(context, router_id, interface) def dhcp_deallocate(self, context, network_id, agents, ports): """Deallocate dhcp resources for the network.""" for agent in agents: self.plugin.remove_network_from_dhcp_agent( context, agent['id'], network_id) for port in ports: try: self.plugin.delete_port(context, port['id']) except n_exc.PortNotFound: LOG.error('Port %s is already gone', port['id']) def dhcp_allocate(self, context, network_id, subnet): """Allocate dhcp resources for the subnet.""" # Create LSN resources network_data = {'id': network_id} nsx.handle_network_dhcp_access(self.plugin, context, network_data, 'create_network') if subnet: subnet_data = {'subnet': subnet} self.notifier.notify(context, subnet_data, 'subnet.create.end') # Get DHCP host and metadata entries created for the LSN port = { 'network_id': network_id, 'fixed_ips': [{'subnet_id': subnet['id']}] } self.notifier.notify(context, {'port': port}, 'port.update.end') class MigrationManager(object): def __init__(self, plugin, lsn_manager, agent_notifier): self.plugin = plugin self.manager = lsn_manager self.builder = DhcpMetadataBuilder(plugin, agent_notifier) def validate(self, context, network_id): """Validate and return subnet's dhcp info for migration.""" network = self.plugin.get_network(context, network_id) if self.manager.lsn_exists(context, network_id): reason = _("LSN already exist") raise p_exc.LsnMigrationConflict(net_id=network_id, reason=reason) if network[extnet_apidef.EXTERNAL]: reason = _("Cannot migrate an external network") raise n_exc.BadRequest(resource='network', msg=reason) filters = {'network_id': [network_id]} subnets = self.plugin.get_subnets(context, filters=filters) count = len(subnets) if count == 0: return None elif count == 1 and subnets[0]['cidr'] == rpc.METADATA_SUBNET_CIDR: reason = _("Cannot migrate a 'metadata' network") raise n_exc.BadRequest(resource='network', msg=reason) elif count > 1: reason = _("Unable to support multiple subnets per network") raise p_exc.LsnMigrationConflict(net_id=network_id, reason=reason) else: return subnets[0] def migrate(self, context, network_id, subnet=None): """Migrate subnet resources to LSN.""" router_id = self.builder.router_id_get(context, subnet) if router_id and subnet: # Deallocate resources taken for the router, if any self.builder.metadata_deallocate(context, router_id, subnet['id']) if subnet: # Deallocate reources taken for the agent, if any agents = self.builder.dhcp_agent_get_all(context, network_id) ports = self.builder.dhcp_port_get_all(context, network_id) self.builder.dhcp_deallocate(context, network_id, agents, ports) # (re)create the configuration for LSN self.builder.dhcp_allocate(context, network_id, subnet) if router_id and subnet: # Allocate resources taken for the router, if any self.builder.metadata_allocate(context, router_id, subnet['id']) def report(self, context, network_id, subnet_id=None): """Return a report of the dhcp and metadata resources in use.""" if subnet_id: lsn_id, lsn_port_id = self.manager.lsn_port_get( context, network_id, subnet_id, raise_on_err=False) else: filters = {'network_id': [network_id]} subnets = self.plugin.get_subnets(context, filters=filters) if subnets: lsn_id, lsn_port_id = self.manager.lsn_port_get( context, network_id, subnets[0]['id'], raise_on_err=False) else: lsn_id = self.manager.lsn_get(context, network_id, raise_on_err=False) lsn_port_id = None if lsn_id: ports = [lsn_port_id] if lsn_port_id else [] report = { 'type': 'lsn', 'services': [lsn_id], 'ports': ports } else: agents = self.builder.dhcp_agent_get_all(context, network_id) ports = self.builder.dhcp_port_get_all(context, network_id) report = { 'type': 'agent', 'services': [a['id'] for a in agents], 'ports': [p['id'] for p in ports] } return report vmware-nsx-12.0.1/vmware_nsx/plugin.py0000666000175100017510000000241613244523345020001 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Note: this import should be here in order to appear before NeutronDbPluginV2 # in each of the plugins. If not: security-group/-rule will not have all the # relevant extend dict registries. from neutron.db.models import securitygroup # noqa from vmware_nsx.plugins.dvs import plugin as dvs from vmware_nsx.plugins.nsx import plugin as nsx from vmware_nsx.plugins.nsx_mh import plugin as nsx_mh from vmware_nsx.plugins.nsx_v import plugin as nsx_v from vmware_nsx.plugins.nsx_v3 import plugin as nsx_v3 NsxDvsPlugin = dvs.NsxDvsV2 NsxPlugin = nsx_mh.NsxPluginV2 NsxVPlugin = nsx_v.NsxVPluginV2 NsxV3Plugin = nsx_v3.NsxV3Plugin NsxTVDPlugin = nsx.NsxTVDPlugin vmware-nsx-12.0.1/vmware_nsx/nsxlib/0000775000175100017510000000000013244524600017416 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/nsxlib/__init__.py0000666000175100017510000000000013244523345021524 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/nsxlib/mh/0000775000175100017510000000000013244524600020022 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/vmware_nsx/nsxlib/mh/l2gateway.py0000666000175100017510000002133213244523345022303 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log from oslo_serialization import jsonutils from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import utils from vmware_nsx.nsxlib import mh as nsxlib from vmware_nsx.nsxlib.mh import switch HTTP_GET = "GET" HTTP_POST = "POST" HTTP_DELETE = "DELETE" HTTP_PUT = "PUT" GWSERVICE_RESOURCE = "gateway-service" TRANSPORTNODE_RESOURCE = "transport-node" LOG = log.getLogger(__name__) def create_l2_gw_service(cluster, tenant_id, display_name, devices): """Create a NSX Layer-2 Network Gateway Service. :param cluster: The target NSX cluster :param tenant_id: Identifier of the Openstack tenant for which the gateway service. :param display_name: Descriptive name of this gateway service :param devices: List of transport node uuids (and network interfaces on them) to use for the network gateway service :raise NsxApiException: if there is a problem while communicating with the NSX controller """ # NOTE(salvatore-orlando): This is a little confusing, but device_id in # NSX is actually the identifier a physical interface on the gateway # device, which in the Neutron API is referred as interface_name gateways = [{"transport_node_uuid": device['id'], "device_id": device['interface_name'], "type": "L2Gateway"} for device in devices] gwservice_obj = { "display_name": utils.check_and_truncate(display_name), "tags": utils.get_tags(os_tid=tenant_id), "gateways": gateways, "type": "L2GatewayServiceConfig" } return nsxlib.do_request( HTTP_POST, nsxlib._build_uri_path(GWSERVICE_RESOURCE), jsonutils.dumps(gwservice_obj), cluster=cluster) def plug_l2_gw_service(cluster, lswitch_id, lport_id, gateway_id, vlan_id=None): """Plug a Layer-2 Gateway Attachment object in a logical port.""" att_obj = {'type': 'L2GatewayAttachment', 'l2_gateway_service_uuid': gateway_id} if vlan_id: att_obj['vlan_id'] = vlan_id return switch.plug_interface(cluster, lswitch_id, lport_id, att_obj) def get_l2_gw_service(cluster, gateway_id): return nsxlib.do_request( HTTP_GET, nsxlib._build_uri_path(GWSERVICE_RESOURCE, resource_id=gateway_id), cluster=cluster) def get_l2_gw_services(cluster, tenant_id=None, fields=None, filters=None): actual_filters = dict(filters or {}) if tenant_id: actual_filters['tag'] = tenant_id actual_filters['tag_scope'] = 'os_tid' return nsxlib.get_all_query_pages( nsxlib._build_uri_path(GWSERVICE_RESOURCE, filters=actual_filters), cluster) def update_l2_gw_service(cluster, gateway_id, display_name): # TODO(salvatore-orlando): Allow updates for gateways too gwservice_obj = get_l2_gw_service(cluster, gateway_id) if not display_name: # Nothing to update return gwservice_obj gwservice_obj["display_name"] = utils.check_and_truncate(display_name) return nsxlib.do_request(HTTP_PUT, nsxlib._build_uri_path(GWSERVICE_RESOURCE, resource_id=gateway_id), jsonutils.dumps(gwservice_obj), cluster=cluster) def delete_l2_gw_service(cluster, gateway_id): nsxlib.do_request(HTTP_DELETE, nsxlib._build_uri_path(GWSERVICE_RESOURCE, resource_id=gateway_id), cluster=cluster) def _build_gateway_device_body(tenant_id, display_name, neutron_id, connector_type, connector_ip, client_certificate, tz_uuid): connector_type_mappings = { utils.NetworkTypes.STT: "STTConnector", utils.NetworkTypes.GRE: "GREConnector", utils.NetworkTypes.BRIDGE: "BridgeConnector", 'ipsec%s' % utils.NetworkTypes.STT: "IPsecSTT", 'ipsec%s' % utils.NetworkTypes.GRE: "IPsecGRE", 'ipsec_%s' % utils.NetworkTypes.STT: "IPsecSTT", 'ipsec_%s' % utils.NetworkTypes.GRE: "IPsecGRE"} nsx_connector_type = connector_type_mappings.get(connector_type) if connector_type and not nsx_connector_type: LOG.error("There is no NSX mapping for connector type %s", connector_type) raise nsx_exc.InvalidTransportType(transport_type=connector_type) body = {"display_name": utils.check_and_truncate(display_name), "tags": utils.get_tags(os_tid=tenant_id, q_gw_dev_id=neutron_id), "admin_status_enabled": True} if connector_ip and nsx_connector_type: body["transport_connectors"] = [ {"transport_zone_uuid": tz_uuid, "ip_address": connector_ip, "type": nsx_connector_type}] if client_certificate: body["credential"] = {"client_certificate": {"pem_encoded": client_certificate}, "type": "SecurityCertificateCredential"} return body def create_gateway_device(cluster, tenant_id, display_name, neutron_id, tz_uuid, connector_type, connector_ip, client_certificate): body = _build_gateway_device_body(tenant_id, display_name, neutron_id, connector_type, connector_ip, client_certificate, tz_uuid) try: return nsxlib.do_request( HTTP_POST, nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE), jsonutils.dumps(body, sort_keys=True), cluster=cluster) except api_exc.InvalidSecurityCertificate: raise nsx_exc.InvalidSecurityCertificate() def update_gateway_device(cluster, gateway_id, tenant_id, display_name, neutron_id, tz_uuid, connector_type, connector_ip, client_certificate): body = _build_gateway_device_body(tenant_id, display_name, neutron_id, connector_type, connector_ip, client_certificate, tz_uuid) try: return nsxlib.do_request( HTTP_PUT, nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE, resource_id=gateway_id), jsonutils.dumps(body, sort_keys=True), cluster=cluster) except api_exc.InvalidSecurityCertificate: raise nsx_exc.InvalidSecurityCertificate() def delete_gateway_device(cluster, device_uuid): return nsxlib.do_request(HTTP_DELETE, nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE, device_uuid), cluster=cluster) def get_gateway_device_status(cluster, device_uuid): status_res = nsxlib.do_request(HTTP_GET, nsxlib._build_uri_path( TRANSPORTNODE_RESOURCE, device_uuid, extra_action='status'), cluster=cluster) # Returns the connection status return status_res['connection']['connected'] def get_gateway_devices_status(cluster, tenant_id=None): if tenant_id: gw_device_query_path = nsxlib._build_uri_path( TRANSPORTNODE_RESOURCE, fields="uuid,tags", relations="TransportNodeStatus", filters={'tag': tenant_id, 'tag_scope': 'os_tid'}) else: gw_device_query_path = nsxlib._build_uri_path( TRANSPORTNODE_RESOURCE, fields="uuid,tags", relations="TransportNodeStatus") response = nsxlib.get_all_query_pages(gw_device_query_path, cluster) results = {} for item in response: results[item['uuid']] = (item['_relations']['TransportNodeStatus'] ['connection']['connected']) return results vmware-nsx-12.0.1/vmware_nsx/nsxlib/mh/lsn.py0000666000175100017510000002506413244523345021206 0ustar zuulzuul00000000000000# Copyright 2013 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as exception from oslo_log import log from oslo_serialization import jsonutils import six from vmware_nsx._i18n import _ from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import utils from vmware_nsx.nsxlib import mh as nsxlib HTTP_GET = "GET" HTTP_POST = "POST" HTTP_DELETE = "DELETE" HTTP_PUT = "PUT" SERVICECLUSTER_RESOURCE = "edge-cluster" LSERVICESNODE_RESOURCE = "lservices-node" LSERVICESNODEPORT_RESOURCE = "lport/%s" % LSERVICESNODE_RESOURCE SUPPORTED_METADATA_OPTIONS = ['metadata_proxy_shared_secret'] LOG = log.getLogger(__name__) def service_cluster_exists(cluster, svc_cluster_id): exists = False try: exists = ( svc_cluster_id and nsxlib.do_request(HTTP_GET, nsxlib._build_uri_path( SERVICECLUSTER_RESOURCE, resource_id=svc_cluster_id), cluster=cluster) is not None) except exception.NotFound: pass return exists def lsn_for_network_create(cluster, network_id): lsn_obj = { "edge_cluster_uuid": cluster.default_service_cluster_uuid, "tags": utils.get_tags(n_network_id=network_id) } return nsxlib.do_request(HTTP_POST, nsxlib._build_uri_path(LSERVICESNODE_RESOURCE), jsonutils.dumps(lsn_obj, sort_keys=True), cluster=cluster)["uuid"] def lsn_for_network_get(cluster, network_id): filters = {"tag": network_id, "tag_scope": "n_network_id"} results = nsxlib.do_request(HTTP_GET, nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, fields="uuid", filters=filters), cluster=cluster)['results'] if not results: raise exception.NotFound() elif len(results) == 1: return results[0]['uuid'] def lsn_delete(cluster, lsn_id): nsxlib.do_request(HTTP_DELETE, nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, resource_id=lsn_id), cluster=cluster) def lsn_port_host_entries_update( cluster, lsn_id, lsn_port_id, conf, hosts_data): hosts_obj = {'hosts': hosts_data} nsxlib.do_request(HTTP_PUT, nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, parent_resource_id=lsn_id, resource_id=lsn_port_id, extra_action=conf), jsonutils.dumps(hosts_obj, sort_keys=True), cluster=cluster) def lsn_port_create(cluster, lsn_id, port_data): port_obj = { "ip_address": port_data["ip_address"], "mac_address": port_data["mac_address"], "tags": utils.get_tags(n_mac_address=port_data["mac_address"], n_subnet_id=port_data["subnet_id"]), "type": "LogicalServicesNodePortConfig", } return nsxlib.do_request(HTTP_POST, nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, parent_resource_id=lsn_id), jsonutils.dumps(port_obj, sort_keys=True), cluster=cluster)["uuid"] def lsn_port_delete(cluster, lsn_id, lsn_port_id): return nsxlib.do_request(HTTP_DELETE, nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, parent_resource_id=lsn_id, resource_id=lsn_port_id), cluster=cluster) def _lsn_port_get(cluster, lsn_id, filters): results = nsxlib.do_request(HTTP_GET, nsxlib._build_uri_path( LSERVICESNODEPORT_RESOURCE, parent_resource_id=lsn_id, fields="uuid", filters=filters), cluster=cluster)['results'] if not results: raise exception.NotFound() elif len(results) == 1: return results[0]['uuid'] def lsn_port_by_mac_get(cluster, lsn_id, mac_address): filters = {"tag": mac_address, "tag_scope": "n_mac_address"} return _lsn_port_get(cluster, lsn_id, filters) def lsn_port_by_subnet_get(cluster, lsn_id, subnet_id): filters = {"tag": subnet_id, "tag_scope": "n_subnet_id"} return _lsn_port_get(cluster, lsn_id, filters) def lsn_port_info_get(cluster, lsn_id, lsn_port_id): result = nsxlib.do_request(HTTP_GET, nsxlib._build_uri_path( LSERVICESNODEPORT_RESOURCE, parent_resource_id=lsn_id, resource_id=lsn_port_id), cluster=cluster) for tag in result['tags']: if tag['scope'] == 'n_subnet_id': result['subnet_id'] = tag['tag'] break return result def lsn_port_plug_network(cluster, lsn_id, lsn_port_id, lswitch_port_id): patch_obj = { "type": "PatchAttachment", "peer_port_uuid": lswitch_port_id } try: nsxlib.do_request(HTTP_PUT, nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, parent_resource_id=lsn_id, resource_id=lsn_port_id, is_attachment=True), jsonutils.dumps(patch_obj, sort_keys=True), cluster=cluster) except api_exc.Conflict: # This restriction might be lifted at some point msg = (_("Attempt to plug Logical Services Node %(lsn)s into " "network with port %(port)s failed. PatchAttachment " "already exists with another port") % {'lsn': lsn_id, 'port': lswitch_port_id}) LOG.exception(msg) raise nsx_exc.LsnConfigurationConflict(lsn_id=lsn_id) def _lsn_configure_action( cluster, lsn_id, action, is_enabled, obj): lsn_obj = {"enabled": is_enabled} lsn_obj.update(obj) nsxlib.do_request(HTTP_PUT, nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, resource_id=lsn_id, extra_action=action), jsonutils.dumps(lsn_obj, sort_keys=True), cluster=cluster) def _lsn_port_configure_action( cluster, lsn_id, lsn_port_id, action, is_enabled, obj): nsxlib.do_request(HTTP_PUT, nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, resource_id=lsn_id, extra_action=action), jsonutils.dumps({"enabled": is_enabled}, sort_keys=True), cluster=cluster) nsxlib.do_request(HTTP_PUT, nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, parent_resource_id=lsn_id, resource_id=lsn_port_id, extra_action=action), jsonutils.dumps(obj, sort_keys=True), cluster=cluster) def _get_opts(name, value): return {"name": name, "value": str(value)} def lsn_port_dhcp_configure( cluster, lsn_id, lsn_port_id, is_enabled=True, dhcp_options=None): dhcp_options = dhcp_options or {} opts = [_get_opts(key, val) for key, val in six.iteritems(dhcp_options)] dhcp_obj = {'options': opts} _lsn_port_configure_action( cluster, lsn_id, lsn_port_id, 'dhcp', is_enabled, dhcp_obj) def lsn_metadata_configure( cluster, lsn_id, is_enabled=True, metadata_info=None): meta_obj = { 'metadata_server_ip': metadata_info['metadata_server_ip'], 'metadata_server_port': metadata_info['metadata_server_port'], } if metadata_info: opts = [ _get_opts(opt, metadata_info[opt]) for opt in SUPPORTED_METADATA_OPTIONS if metadata_info.get(opt) ] if opts: meta_obj["options"] = opts _lsn_configure_action( cluster, lsn_id, 'metadata-proxy', is_enabled, meta_obj) def _lsn_port_host_action( cluster, lsn_id, lsn_port_id, host_obj, extra_action, action): nsxlib.do_request(HTTP_POST, nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, parent_resource_id=lsn_id, resource_id=lsn_port_id, extra_action=extra_action, filters={"action": action}), jsonutils.dumps(host_obj, sort_keys=True), cluster=cluster) def lsn_port_dhcp_host_add(cluster, lsn_id, lsn_port_id, host_data): _lsn_port_host_action( cluster, lsn_id, lsn_port_id, host_data, 'dhcp', 'add_host') def lsn_port_dhcp_host_remove(cluster, lsn_id, lsn_port_id, host_data): _lsn_port_host_action( cluster, lsn_id, lsn_port_id, host_data, 'dhcp', 'remove_host') def lsn_port_metadata_host_add(cluster, lsn_id, lsn_port_id, host_data): _lsn_port_host_action( cluster, lsn_id, lsn_port_id, host_data, 'metadata-proxy', 'add_host') def lsn_port_metadata_host_remove(cluster, lsn_id, lsn_port_id, host_data): _lsn_port_host_action(cluster, lsn_id, lsn_port_id, host_data, 'metadata-proxy', 'remove_host') vmware-nsx-12.0.1/vmware_nsx/nsxlib/mh/__init__.py0000666000175100017510000001206113244523345022142 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron import version from neutron_lib import exceptions as exception from oslo_log import log from oslo_serialization import jsonutils import six from vmware_nsx._i18n import _ from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import exceptions as nsx_exc HTTP_GET = "GET" HTTP_POST = "POST" HTTP_DELETE = "DELETE" HTTP_PUT = "PUT" # Prefix to be used for all NSX API calls URI_PREFIX = "/ws.v1" NEUTRON_VERSION = version.version_info.release_string() LOG = log.getLogger(__name__) def _build_uri_path(resource, resource_id=None, parent_resource_id=None, fields=None, relations=None, filters=None, types=None, is_attachment=False, extra_action=None): resources = resource.split('/') res_path = resources[0] if resource_id: res_path += "/%s" % resource_id if len(resources) > 1: # There is also a parent resource to account for in the uri res_path = "%s/%s/%s" % (resources[1], parent_resource_id, res_path) if is_attachment: res_path = "%s/attachment" % res_path elif extra_action: res_path = "%s/%s" % (res_path, extra_action) params = [] params.append(fields and "fields=%s" % fields) params.append(relations and "relations=%s" % relations) params.append(types and "types=%s" % types) if filters: sorted_filters = [ '%s=%s' % (k, filters[k]) for k in sorted(filters.keys()) ] params.extend(sorted_filters) uri_path = "%s/%s" % (URI_PREFIX, res_path) non_empty_params = [x for x in params if x is not None] if non_empty_params: query_string = '&'.join(non_empty_params) if query_string: uri_path += "?%s" % query_string return uri_path def format_exception(etype, e, exception_locals): """Consistent formatting for exceptions. :param etype: a string describing the exception type. :param e: the exception. :param execption_locals: calling context local variable dict. :returns: a formatted string. """ msg = [_("Error. %(type)s exception: %(exc)s.") % {'type': etype, 'exc': e}] l = dict((k, v) for k, v in six.iteritems(exception_locals) if k != 'request') msg.append(_("locals=[%s]") % str(l)) return ' '.join(msg) def do_request(*args, **kwargs): """Issue a request to the cluster specified in kwargs. :param args: a list of positional arguments. :param kwargs: a list of keyworkds arguments. :returns: the result of the operation loaded into a python object or None. """ cluster = kwargs["cluster"] try: res = cluster.api_client.request(*args) if res: return jsonutils.loads(res) except api_exc.ResourceNotFound: raise exception.NotFound() except api_exc.ReadOnlyMode: raise nsx_exc.MaintenanceInProgress() def get_single_query_page(path, cluster, page_cursor=None, page_length=1000, neutron_only=True): params = [] if page_cursor: params.append("_page_cursor=%s" % page_cursor) params.append("_page_length=%s" % page_length) # NOTE(salv-orlando): On the NSX backend the 'Quantum' tag is still # used for marking Neutron entities in order to preserve compatibility if neutron_only: params.append("tag_scope=quantum") query_params = "&".join(params) path = "%s%s%s" % (path, "&" if (path.find("?") != -1) else "?", query_params) body = do_request(HTTP_GET, path, cluster=cluster) # Result_count won't be returned if _page_cursor is supplied return body['results'], body.get('page_cursor'), body.get('result_count') def get_all_query_pages(path, cluster): need_more_results = True result_list = [] page_cursor = None while need_more_results: results, page_cursor = get_single_query_page( path, cluster, page_cursor)[:2] if not page_cursor: need_more_results = False result_list.extend(results) return result_list def mk_body(**kwargs): """Convenience function creates and dumps dictionary to string. :param kwargs: the key/value pirs to be dumped into a json string. :returns: a json string. """ return jsonutils.dumps(kwargs, ensure_ascii=False) vmware-nsx-12.0.1/vmware_nsx/nsxlib/mh/switch.py0000666000175100017510000004021413244523345021705 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib import constants from neutron_lib import exceptions as exception from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from vmware_nsx._i18n import _ from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import utils from vmware_nsx.nsxlib import mh as nsxlib HTTP_GET = "GET" HTTP_POST = "POST" HTTP_DELETE = "DELETE" HTTP_PUT = "PUT" LSWITCH_RESOURCE = "lswitch" LSWITCHPORT_RESOURCE = "lport/%s" % LSWITCH_RESOURCE LOG = log.getLogger(__name__) def _configure_extensions(lport_obj, mac_address, fixed_ips, port_security_enabled, security_profiles, queue_id, mac_learning_enabled, allowed_address_pairs): lport_obj['allowed_address_pairs'] = [] if port_security_enabled: for fixed_ip in fixed_ips: ip_address = fixed_ip.get('ip_address') if ip_address: lport_obj['allowed_address_pairs'].append( {'mac_address': mac_address, 'ip_address': ip_address}) # add address pair allowing src_ip 0.0.0.0 to leave # this is required for outgoing dhcp request lport_obj["allowed_address_pairs"].append( {"mac_address": mac_address, "ip_address": "0.0.0.0"}) lport_obj['security_profiles'] = list(security_profiles or []) lport_obj['queue_uuid'] = queue_id if mac_learning_enabled is not None: lport_obj["mac_learning"] = mac_learning_enabled lport_obj["type"] = "LogicalSwitchPortConfig" for address_pair in list(allowed_address_pairs or []): lport_obj['allowed_address_pairs'].append( {'mac_address': address_pair['mac_address'], 'ip_address': address_pair['ip_address']}) def get_lswitch_by_id(cluster, lswitch_id): try: lswitch_uri_path = nsxlib._build_uri_path( LSWITCH_RESOURCE, lswitch_id, relations="LogicalSwitchStatus") return nsxlib.do_request(HTTP_GET, lswitch_uri_path, cluster=cluster) except exception.NotFound: # FIXME(salv-orlando): this should not raise a neutron exception raise exception.NetworkNotFound(net_id=lswitch_id) def get_lswitches(cluster, neutron_net_id): def lookup_switches_by_tag(): # Fetch extra logical switches lswitch_query_path = nsxlib._build_uri_path( LSWITCH_RESOURCE, fields="uuid,display_name,tags,lport_count", relations="LogicalSwitchStatus", filters={'tag': neutron_net_id, 'tag_scope': 'quantum_net_id'}) return nsxlib.get_all_query_pages(lswitch_query_path, cluster) lswitch_uri_path = nsxlib._build_uri_path(LSWITCH_RESOURCE, neutron_net_id, relations="LogicalSwitchStatus") results = [] try: ls = nsxlib.do_request(HTTP_GET, lswitch_uri_path, cluster=cluster) results.append(ls) for tag in ls['tags']: if (tag['scope'] == "multi_lswitch" and tag['tag'] == "True"): results.extend(lookup_switches_by_tag()) except exception.NotFound: # This is legit if the neutron network was created using # a post-Havana version of the plugin results.extend(lookup_switches_by_tag()) if results: return results else: raise exception.NetworkNotFound(net_id=neutron_net_id) def create_lswitch(cluster, neutron_net_id, tenant_id, display_name, transport_zones_config, shared=None, **kwargs): # The tag scope adopts a slightly different naming convention for # historical reasons lswitch_obj = {"display_name": utils.check_and_truncate(display_name), "transport_zones": transport_zones_config, "replication_mode": cfg.CONF.NSX.replication_mode, "tags": utils.get_tags(os_tid=tenant_id, quantum_net_id=neutron_net_id)} # TODO(salv-orlando): Now that we have async status synchronization # this tag is perhaps not needed anymore if shared: lswitch_obj["tags"].append({"tag": "true", "scope": "shared"}) if "tags" in kwargs: lswitch_obj["tags"].extend(kwargs["tags"]) uri = nsxlib._build_uri_path(LSWITCH_RESOURCE) lswitch = nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(lswitch_obj), cluster=cluster) LOG.debug("Created logical switch: %s", lswitch['uuid']) return lswitch def update_lswitch(cluster, lswitch_id, display_name, tenant_id=None, **kwargs): uri = nsxlib._build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id) lswitch_obj = {"display_name": utils.check_and_truncate(display_name)} # NOTE: tag update will not 'merge' existing tags with new ones. tags = [] if tenant_id: tags = utils.get_tags(os_tid=tenant_id) # The 'tags' kwarg might existing and be None tags.extend(kwargs.get('tags') or []) if tags: lswitch_obj['tags'] = tags try: return nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(lswitch_obj), cluster=cluster) except exception.NotFound as e: LOG.error("Network not found, Error: %s", str(e)) raise exception.NetworkNotFound(net_id=lswitch_id) def delete_network(cluster, net_id, lswitch_id): delete_networks(cluster, net_id, [lswitch_id]) #TODO(salvatore-orlando): Simplify and harmonize def delete_networks(cluster, net_id, lswitch_ids): for ls_id in lswitch_ids: path = "/ws.v1/lswitch/%s" % ls_id try: nsxlib.do_request(HTTP_DELETE, path, cluster=cluster) except exception.NotFound as e: LOG.error("Network not found, Error: %s", str(e)) raise exception.NetworkNotFound(net_id=ls_id) def query_lswitch_lports(cluster, ls_uuid, fields="*", filters=None, relations=None): # Fix filter for attachments if filters and "attachment" in filters: filters['attachment_vif_uuid'] = filters["attachment"] del filters['attachment'] uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, parent_resource_id=ls_uuid, fields=fields, filters=filters, relations=relations) return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)['results'] def delete_port(cluster, switch, port): uri = "/ws.v1/lswitch/" + switch + "/lport/" + port try: nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster) except exception.NotFound as e: LOG.error("Port or Network not found, Error: %s", str(e)) raise exception.PortNotFoundOnNetwork( net_id=switch, port_id=port) except api_exc.NsxApiException: raise exception.NeutronException() def get_ports(cluster, networks=None, devices=None, tenants=None): vm_filter_obsolete = "" vm_filter = "" tenant_filter = "" # This is used when calling delete_network. Neutron checks to see if # the network has any ports. if networks: # FIXME (Aaron) If we get more than one network_id this won't work lswitch = networks[0] else: lswitch = "*" if devices: for device_id in devices: vm_filter_obsolete = '&'.join( ["tag_scope=vm_id", "tag=%s" % utils.device_id_to_vm_id(device_id, obfuscate=True), vm_filter_obsolete]) vm_filter = '&'.join( ["tag_scope=vm_id", "tag=%s" % utils.device_id_to_vm_id(device_id), vm_filter]) if tenants: for tenant in tenants: tenant_filter = '&'.join( ["tag_scope=os_tid", "tag=%s" % tenant, tenant_filter]) nsx_lports = {} lport_fields_str = ("tags,admin_status_enabled,display_name," "fabric_status_up") try: lport_query_path_obsolete = ( "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id" "&relations=LogicalPortStatus" % (lswitch, lport_fields_str, vm_filter_obsolete, tenant_filter)) lport_query_path = ( "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id" "&relations=LogicalPortStatus" % (lswitch, lport_fields_str, vm_filter, tenant_filter)) try: # NOTE(armando-migliaccio): by querying with obsolete tag first # current deployments won't take the performance hit of a double # call. In release L-** or M-**, we might want to swap the calls # as it's likely that ports with the new tag would outnumber the # ones with the old tag ports = nsxlib.get_all_query_pages(lport_query_path_obsolete, cluster) if not ports: ports = nsxlib.get_all_query_pages(lport_query_path, cluster) except exception.NotFound: LOG.warning("Lswitch %s not found in NSX", lswitch) ports = None if ports: for port in ports: for tag in port["tags"]: if tag["scope"] == "q_port_id": nsx_lports[tag["tag"]] = port except Exception: err_msg = _("Unable to get ports") LOG.exception(err_msg) raise nsx_exc.NsxPluginException(err_msg=err_msg) return nsx_lports def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id): """Get port by neutron tag. Returns the NSX UUID of the logical port with tag q_port_id equal to neutron_port_id or None if the port is not Found. """ uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, parent_resource_id=lswitch_uuid, fields='uuid', filters={'tag': neutron_port_id, 'tag_scope': 'q_port_id'}) LOG.debug("Looking for port with q_port_id tag '%(neutron_port_id)s' " "on: '%(lswitch_uuid)s'", {'neutron_port_id': neutron_port_id, 'lswitch_uuid': lswitch_uuid}) res = nsxlib.do_request(HTTP_GET, uri, cluster=cluster) num_results = len(res["results"]) if num_results >= 1: if num_results > 1: LOG.warning("Found '%(num_ports)d' ports with " "q_port_id tag: '%(neutron_port_id)s'. " "Only 1 was expected.", {'num_ports': num_results, 'neutron_port_id': neutron_port_id}) return res["results"][0] def get_port(cluster, network, port, relations=None): LOG.info("get_port() %(network)s %(port)s", {'network': network, 'port': port}) uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?" if relations: uri += "relations=%s" % relations try: return nsxlib.do_request(HTTP_GET, uri, cluster=cluster) except exception.NotFound as e: LOG.error("Port or Network not found, Error: %s", str(e)) raise exception.PortNotFoundOnNetwork( port_id=port, net_id=network) def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id, display_name, device_id, admin_status_enabled, mac_address=None, fixed_ips=None, port_security_enabled=None, security_profiles=None, queue_id=None, mac_learning_enabled=None, allowed_address_pairs=None): lport_obj = dict( admin_status_enabled=admin_status_enabled, display_name=utils.check_and_truncate(display_name), tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id, vm_id=utils.device_id_to_vm_id(device_id))) _configure_extensions(lport_obj, mac_address, fixed_ips, port_security_enabled, security_profiles, queue_id, mac_learning_enabled, allowed_address_pairs) path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid try: result = nsxlib.do_request(HTTP_PUT, path, jsonutils.dumps(lport_obj), cluster=cluster) LOG.debug("Updated logical port %(result)s " "on logical switch %(uuid)s", {'result': result['uuid'], 'uuid': lswitch_uuid}) return result except exception.NotFound as e: LOG.error("Port or Network not found, Error: %s", str(e)) raise exception.PortNotFoundOnNetwork( port_id=lport_uuid, net_id=lswitch_uuid) def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id, display_name, device_id, admin_status_enabled, mac_address=None, fixed_ips=None, port_security_enabled=None, security_profiles=None, queue_id=None, mac_learning_enabled=None, allowed_address_pairs=None): """Creates a logical port on the assigned logical switch.""" display_name = utils.check_and_truncate(display_name) lport_obj = dict( admin_status_enabled=admin_status_enabled, display_name=display_name, tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id, vm_id=utils.device_id_to_vm_id(device_id)) ) _configure_extensions(lport_obj, mac_address, fixed_ips, port_security_enabled, security_profiles, queue_id, mac_learning_enabled, allowed_address_pairs) path = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, parent_resource_id=lswitch_uuid) result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj), cluster=cluster) LOG.debug("Created logical port %(result)s on logical switch %(uuid)s", {'result': result['uuid'], 'uuid': lswitch_uuid}) return result def get_port_status(cluster, lswitch_id, port_id): """Retrieve the operational status of the port.""" try: r = nsxlib.do_request(HTTP_GET, "/ws.v1/lswitch/%s/lport/%s/status" % (lswitch_id, port_id), cluster=cluster) except exception.NotFound as e: LOG.error("Port not found, Error: %s", str(e)) raise exception.PortNotFoundOnNetwork( port_id=port_id, net_id=lswitch_id) if r['link_status_up'] is True: return constants.PORT_STATUS_ACTIVE else: return constants.PORT_STATUS_DOWN def plug_interface(cluster, lswitch_id, lport_id, att_obj): return nsxlib.do_request(HTTP_PUT, nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, lport_id, lswitch_id, is_attachment=True), jsonutils.dumps(att_obj), cluster=cluster) def plug_vif_interface( cluster, lswitch_id, port_id, port_type, attachment=None): """Plug a VIF Attachment object in a logical port.""" lport_obj = {} if attachment: lport_obj["vif_uuid"] = attachment lport_obj["type"] = port_type return plug_interface(cluster, lswitch_id, port_id, lport_obj) vmware-nsx-12.0.1/vmware_nsx/nsxlib/mh/router.py0000666000175100017510000007045413244523345021735 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as exception from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import excutils import six from vmware_nsx._i18n import _ from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import utils from vmware_nsx.nsxlib import mh as nsxlib from vmware_nsx.nsxlib.mh import switch from vmware_nsx.nsxlib.mh import versioning # @versioning.versioned decorator makes the apparent function body # totally unrelated to the real function. This confuses pylint :( # pylint: disable=assignment-from-no-return HTTP_GET = "GET" HTTP_POST = "POST" HTTP_DELETE = "DELETE" HTTP_PUT = "PUT" LROUTER_RESOURCE = "lrouter" LROUTER_RESOURCE = "lrouter" LROUTERPORT_RESOURCE = "lport/%s" % LROUTER_RESOURCE LROUTERRIB_RESOURCE = "rib/%s" % LROUTER_RESOURCE LROUTERNAT_RESOURCE = "nat/lrouter" # Constants for NAT rules MATCH_KEYS = ["destination_ip_addresses", "destination_port_max", "destination_port_min", "source_ip_addresses", "source_port_max", "source_port_min", "protocol"] LOG = log.getLogger(__name__) def _prepare_lrouter_body(name, neutron_router_id, tenant_id, router_type, distributed=None, **kwargs): body = { "display_name": utils.check_and_truncate(name), "tags": utils.get_tags(os_tid=tenant_id, q_router_id=neutron_router_id), "routing_config": { "type": router_type }, "type": "LogicalRouterConfig", "replication_mode": cfg.CONF.NSX.replication_mode, } # add the distributed key only if not None (ie: True or False) if distributed is not None: body['distributed'] = distributed if kwargs: body["routing_config"].update(kwargs) return body def _create_implicit_routing_lrouter(cluster, neutron_router_id, tenant_id, display_name, nexthop, distributed=None): implicit_routing_config = { "default_route_next_hop": { "gateway_ip_address": nexthop, "type": "RouterNextHop" }, } lrouter_obj = _prepare_lrouter_body( display_name, neutron_router_id, tenant_id, "SingleDefaultRouteImplicitRoutingConfig", distributed=distributed, **implicit_routing_config) return nsxlib.do_request(HTTP_POST, nsxlib._build_uri_path(LROUTER_RESOURCE), jsonutils.dumps(lrouter_obj), cluster=cluster) def create_implicit_routing_lrouter(cluster, neutron_router_id, tenant_id, display_name, nexthop): """Create a NSX logical router on the specified cluster. :param cluster: The target NSX cluster :param tenant_id: Identifier of the Openstack tenant for which the logical router is being created :param display_name: Descriptive name of this logical router :param nexthop: External gateway IP address for the logical router :raise NsxApiException: if there is a problem while communicating with the NSX controller """ return _create_implicit_routing_lrouter( cluster, neutron_router_id, tenant_id, display_name, nexthop) def create_implicit_routing_lrouter_with_distribution( cluster, neutron_router_id, tenant_id, display_name, nexthop, distributed=None): """Create a NSX logical router on the specified cluster. This function also allows for creating distributed lrouters :param cluster: The target NSX cluster :param tenant_id: Identifier of the Openstack tenant for which the logical router is being created :param display_name: Descriptive name of this logical router :param nexthop: External gateway IP address for the logical router :param distributed: True for distributed logical routers :raise NsxApiException: if there is a problem while communicating with the NSX controller """ return _create_implicit_routing_lrouter( cluster, neutron_router_id, tenant_id, display_name, nexthop, distributed) def create_explicit_routing_lrouter(cluster, neutron_router_id, tenant_id, display_name, nexthop, distributed=None): lrouter_obj = _prepare_lrouter_body( display_name, neutron_router_id, tenant_id, "RoutingTableRoutingConfig", distributed=distributed) router = nsxlib.do_request(HTTP_POST, nsxlib._build_uri_path(LROUTER_RESOURCE), jsonutils.dumps(lrouter_obj), cluster=cluster) default_gw = {'prefix': '0.0.0.0/0', 'next_hop_ip': nexthop} create_explicit_route_lrouter(cluster, router['uuid'], default_gw) return router def delete_lrouter(cluster, lrouter_id): nsxlib.do_request(HTTP_DELETE, nsxlib._build_uri_path(LROUTER_RESOURCE, resource_id=lrouter_id), cluster=cluster) def get_lrouter(cluster, lrouter_id): return nsxlib.do_request(HTTP_GET, nsxlib._build_uri_path( LROUTER_RESOURCE, resource_id=lrouter_id, relations='LogicalRouterStatus'), cluster=cluster) def query_lrouters(cluster, fields=None, filters=None): return nsxlib.get_all_query_pages( nsxlib._build_uri_path(LROUTER_RESOURCE, fields=fields, relations='LogicalRouterStatus', filters=filters), cluster) def get_lrouters(cluster, tenant_id, fields=None, filters=None): # FIXME(salv-orlando): Fields parameter is ignored in this routine actual_filters = {} if filters: actual_filters.update(filters) if tenant_id: actual_filters['tag'] = tenant_id actual_filters['tag_scope'] = 'os_tid' lrouter_fields = "uuid,display_name,fabric_status,tags" return query_lrouters(cluster, lrouter_fields, actual_filters) def update_implicit_routing_lrouter(cluster, r_id, display_name, nexthop): lrouter_obj = get_lrouter(cluster, r_id) if not display_name and not nexthop: # Nothing to update return lrouter_obj # It seems that this is faster than the doing an if on display_name lrouter_obj["display_name"] = (utils.check_and_truncate(display_name) or lrouter_obj["display_name"]) if nexthop: nh_element = lrouter_obj["routing_config"].get( "default_route_next_hop") if nh_element: nh_element["gateway_ip_address"] = nexthop return nsxlib.do_request(HTTP_PUT, nsxlib._build_uri_path(LROUTER_RESOURCE, resource_id=r_id), jsonutils.dumps(lrouter_obj), cluster=cluster) def get_explicit_routes_lrouter(cluster, router_id, protocol_type='static'): static_filter = {'protocol': protocol_type} existing_routes = nsxlib.do_request( HTTP_GET, nsxlib._build_uri_path(LROUTERRIB_RESOURCE, filters=static_filter, fields="*", parent_resource_id=router_id), cluster=cluster)['results'] return existing_routes def delete_explicit_route_lrouter(cluster, router_id, route_id): nsxlib.do_request(HTTP_DELETE, nsxlib._build_uri_path(LROUTERRIB_RESOURCE, resource_id=route_id, parent_resource_id=router_id), cluster=cluster) def create_explicit_route_lrouter(cluster, router_id, route): next_hop_ip = route.get("nexthop") or route.get("next_hop_ip") prefix = route.get("destination") or route.get("prefix") uuid = nsxlib.do_request( HTTP_POST, nsxlib._build_uri_path(LROUTERRIB_RESOURCE, parent_resource_id=router_id), jsonutils.dumps({ "action": "accept", "next_hop_ip": next_hop_ip, "prefix": prefix, "protocol": "static" }), cluster=cluster)['uuid'] return uuid def update_explicit_routes_lrouter(cluster, router_id, routes): # Update in bulk: delete them all, and add the ones specified # but keep track of what is been modified to allow roll-backs # in case of failures nsx_routes = get_explicit_routes_lrouter(cluster, router_id) try: deleted_routes = [] added_routes = [] # omit the default route (0.0.0.0/0) from the processing; # this must be handled through the nexthop for the router for route in nsx_routes: prefix = route.get("destination") or route.get("prefix") if prefix != '0.0.0.0/0': delete_explicit_route_lrouter(cluster, router_id, route['uuid']) deleted_routes.append(route) for route in routes: prefix = route.get("destination") or route.get("prefix") if prefix != '0.0.0.0/0': uuid = create_explicit_route_lrouter(cluster, router_id, route) added_routes.append(uuid) except api_exc.NsxApiException: LOG.exception('Cannot update NSX routes %(routes)s for ' 'router %(router_id)s', {'routes': routes, 'router_id': router_id}) # Roll back to keep NSX in consistent state with excutils.save_and_reraise_exception(): if nsx_routes: if deleted_routes: for route in deleted_routes: create_explicit_route_lrouter(cluster, router_id, route) if added_routes: for route_id in added_routes: delete_explicit_route_lrouter(cluster, router_id, route_id) return nsx_routes def get_default_route_explicit_routing_lrouter_v33(cluster, router_id): static_filter = {"protocol": "static", "prefix": "0.0.0.0/0"} default_route = nsxlib.do_request( HTTP_GET, nsxlib._build_uri_path(LROUTERRIB_RESOURCE, filters=static_filter, fields="*", parent_resource_id=router_id), cluster=cluster)["results"][0] return default_route def get_default_route_explicit_routing_lrouter_v32(cluster, router_id): # Scan all routes because 3.2 does not support query by prefix all_routes = get_explicit_routes_lrouter(cluster, router_id) for route in all_routes: if route['prefix'] == '0.0.0.0/0': return route def update_default_gw_explicit_routing_lrouter(cluster, router_id, next_hop): default_route = get_default_route_explicit_routing_lrouter(cluster, router_id) if next_hop != default_route["next_hop_ip"]: new_default_route = {"action": "accept", "next_hop_ip": next_hop, "prefix": "0.0.0.0/0", "protocol": "static"} nsxlib.do_request(HTTP_PUT, nsxlib._build_uri_path( LROUTERRIB_RESOURCE, resource_id=default_route['uuid'], parent_resource_id=router_id), jsonutils.dumps(new_default_route), cluster=cluster) def update_explicit_routing_lrouter(cluster, router_id, display_name, next_hop, routes=None): update_implicit_routing_lrouter(cluster, router_id, display_name, next_hop) if next_hop: update_default_gw_explicit_routing_lrouter(cluster, router_id, next_hop) if routes is not None: return update_explicit_routes_lrouter(cluster, router_id, routes) def query_lrouter_lports(cluster, lr_uuid, fields="*", filters=None, relations=None): uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, parent_resource_id=lr_uuid, fields=fields, filters=filters, relations=relations) return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)['results'] def create_router_lport(cluster, lrouter_uuid, tenant_id, neutron_port_id, display_name, admin_status_enabled, ip_addresses, mac_address=None): """Creates a logical port on the assigned logical router.""" lport_obj = dict( admin_status_enabled=admin_status_enabled, display_name=display_name, tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id), ip_addresses=ip_addresses, type="LogicalRouterPortConfig" ) # Only add the mac_address to lport_obj if present. This is because # when creating the fake_ext_gw there is no mac_address present. if mac_address: lport_obj['mac_address'] = mac_address path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, parent_resource_id=lrouter_uuid) result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj), cluster=cluster) LOG.debug("Created logical port %(lport_uuid)s on " "logical router %(lrouter_uuid)s", {'lport_uuid': result['uuid'], 'lrouter_uuid': lrouter_uuid}) return result def update_router_lport(cluster, lrouter_uuid, lrouter_port_uuid, tenant_id, neutron_port_id, display_name, admin_status_enabled, ip_addresses): """Updates a logical port on the assigned logical router.""" lport_obj = dict( admin_status_enabled=admin_status_enabled, display_name=display_name, tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id), ip_addresses=ip_addresses, type="LogicalRouterPortConfig" ) # Do not pass null items to NSX for key in lport_obj.keys(): if lport_obj[key] is None: del lport_obj[key] path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lrouter_port_uuid, parent_resource_id=lrouter_uuid) result = nsxlib.do_request(HTTP_PUT, path, jsonutils.dumps(lport_obj), cluster=cluster) LOG.debug("Updated logical port %(lport_uuid)s on " "logical router %(lrouter_uuid)s", {'lport_uuid': lrouter_port_uuid, 'lrouter_uuid': lrouter_uuid}) return result def delete_router_lport(cluster, lrouter_uuid, lport_uuid): """Creates a logical port on the assigned logical router.""" path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_uuid, lrouter_uuid) nsxlib.do_request(HTTP_DELETE, path, cluster=cluster) LOG.debug("Delete logical router port %(lport_uuid)s on " "logical router %(lrouter_uuid)s", {'lport_uuid': lport_uuid, 'lrouter_uuid': lrouter_uuid}) def delete_peer_router_lport(cluster, lr_uuid, ls_uuid, lp_uuid): nsx_port = switch.get_port(cluster, ls_uuid, lp_uuid, relations="LogicalPortAttachment") relations = nsx_port.get('_relations') if relations: att_data = relations.get('LogicalPortAttachment') if att_data: lrp_uuid = att_data.get('peer_port_uuid') if lrp_uuid: delete_router_lport(cluster, lr_uuid, lrp_uuid) def find_router_gw_port(context, cluster, router_id): """Retrieves the external gateway port for a NSX logical router.""" # Find the uuid of nsx ext gw logical router port # TODO(salvatore-orlando): Consider storing it in Neutron DB results = query_lrouter_lports( cluster, router_id, relations="LogicalPortAttachment") for lport in results: if '_relations' in lport: attachment = lport['_relations'].get('LogicalPortAttachment') if attachment and attachment.get('type') == 'L3GatewayAttachment': return lport def plug_router_port_attachment(cluster, router_id, port_id, attachment_uuid, nsx_attachment_type, attachment_vlan=None): """Attach a router port to the given attachment. Current attachment types: - PatchAttachment [-> logical switch port uuid] - L3GatewayAttachment [-> L3GatewayService uuid] For the latter attachment type a VLAN ID can be specified as well. """ uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, port_id, router_id, is_attachment=True) attach_obj = {} attach_obj["type"] = nsx_attachment_type if nsx_attachment_type == "PatchAttachment": attach_obj["peer_port_uuid"] = attachment_uuid elif nsx_attachment_type == "L3GatewayAttachment": attach_obj["l3_gateway_service_uuid"] = attachment_uuid if attachment_vlan: attach_obj['vlan_id'] = attachment_vlan else: raise nsx_exc.InvalidAttachmentType( attachment_type=nsx_attachment_type) return nsxlib.do_request( HTTP_PUT, uri, jsonutils.dumps(attach_obj), cluster=cluster) def _create_nat_match_obj(**kwargs): nat_match_obj = {'ethertype': 'IPv4'} delta = set(kwargs.keys()) - set(MATCH_KEYS) if delta: raise Exception(_("Invalid keys for NAT match: %s"), delta) nat_match_obj.update(kwargs) return nat_match_obj def _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj): LOG.debug("Creating NAT rule: %s", nat_rule_obj) uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, parent_resource_id=router_id) return nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(nat_rule_obj), cluster=cluster) def _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj): return {"to_source_ip_address_min": min_src_ip, "to_source_ip_address_max": max_src_ip, "type": "SourceNatRule", "match": nat_match_obj} def create_lrouter_nosnat_rule_v2(cluster, _router_id, _match_criteria=None): LOG.info("No SNAT rules cannot be applied as they are not available " "in this version of the NSX platform") def create_lrouter_nodnat_rule_v2(cluster, _router_id, _match_criteria=None): LOG.info("No DNAT rules cannot be applied as they are not available " "in this version of the NSX platform") def create_lrouter_snat_rule_v2(cluster, router_id, min_src_ip, max_src_ip, match_criteria=None): nat_match_obj = _create_nat_match_obj(**match_criteria) nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj) return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) def create_lrouter_dnat_rule_v2(cluster, router_id, dst_ip, to_dst_port=None, match_criteria=None): nat_match_obj = _create_nat_match_obj(**match_criteria) nat_rule_obj = { "to_destination_ip_address_min": dst_ip, "to_destination_ip_address_max": dst_ip, "type": "DestinationNatRule", "match": nat_match_obj } if to_dst_port: nat_rule_obj['to_destination_port'] = to_dst_port return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) def create_lrouter_nosnat_rule_v3(cluster, router_id, order=None, match_criteria=None): nat_match_obj = _create_nat_match_obj(**match_criteria) nat_rule_obj = { "type": "NoSourceNatRule", "match": nat_match_obj } if order: nat_rule_obj['order'] = order return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) def create_lrouter_nodnat_rule_v3(cluster, router_id, order=None, match_criteria=None): nat_match_obj = _create_nat_match_obj(**match_criteria) nat_rule_obj = { "type": "NoDestinationNatRule", "match": nat_match_obj } if order: nat_rule_obj['order'] = order return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) def create_lrouter_snat_rule_v3(cluster, router_id, min_src_ip, max_src_ip, order=None, match_criteria=None): nat_match_obj = _create_nat_match_obj(**match_criteria) nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj) if order: nat_rule_obj['order'] = order return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) def create_lrouter_dnat_rule_v3(cluster, router_id, dst_ip, to_dst_port=None, order=None, match_criteria=None): nat_match_obj = _create_nat_match_obj(**match_criteria) nat_rule_obj = { "to_destination_ip_address": dst_ip, "type": "DestinationNatRule", "match": nat_match_obj } if to_dst_port: nat_rule_obj['to_destination_port'] = to_dst_port if order: nat_rule_obj['order'] = order return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) def delete_nat_rules_by_match(cluster, router_id, rule_type, max_num_expected, min_num_expected=0, raise_on_len_mismatch=True, **kwargs): # remove nat rules nat_rules = query_nat_rules(cluster, router_id) to_delete_ids = [] for r in nat_rules: if (r['type'] != rule_type): continue for key, value in six.iteritems(kwargs): if not (key in r['match'] and r['match'][key] == value): break else: to_delete_ids.append(r['uuid']) num_rules_to_delete = len(to_delete_ids) if (num_rules_to_delete < min_num_expected or num_rules_to_delete > max_num_expected): if raise_on_len_mismatch: raise nsx_exc.NatRuleMismatch(actual_rules=num_rules_to_delete, min_rules=min_num_expected, max_rules=max_num_expected) else: LOG.warning("Found %(actual_rule_num)d matching NAT rules, " "which is not in the expected range " "(%(min_exp_rule_num)d,%(max_exp_rule_num)d)", {'actual_rule_num': num_rules_to_delete, 'min_exp_rule_num': min_num_expected, 'max_exp_rule_num': max_num_expected}) for rule_id in to_delete_ids: delete_router_nat_rule(cluster, router_id, rule_id) # Return number of deleted rules - useful at least for # testing purposes return num_rules_to_delete def delete_router_nat_rule(cluster, router_id, rule_id): uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, rule_id, router_id) nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster) def query_nat_rules(cluster, router_id, fields="*", filters=None): uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, parent_resource_id=router_id, fields=fields, filters=filters) return nsxlib.get_all_query_pages(uri, cluster) # NOTE(salvatore-orlando): The following FIXME applies in general to # each operation on list attributes. # FIXME(salvatore-orlando): need a lock around the list of IPs on an iface def update_lrouter_port_ips(cluster, lrouter_id, lport_id, ips_to_add, ips_to_remove): uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_id, lrouter_id) try: port = nsxlib.do_request(HTTP_GET, uri, cluster=cluster) # TODO(salvatore-orlando): Enforce ips_to_add intersection with # ips_to_remove is empty ip_address_set = set(port['ip_addresses']) ip_address_set = ip_address_set - set(ips_to_remove) ip_address_set = ip_address_set | set(ips_to_add) # Set is not JSON serializable - convert to list port['ip_addresses'] = list(ip_address_set) nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(port), cluster=cluster) except exception.NotFound: # FIXME(salv-orlando):avoid raising different exception data = {'lport_id': lport_id, 'lrouter_id': lrouter_id} msg = (_("Router Port %(lport_id)s not found on router " "%(lrouter_id)s") % data) LOG.exception(msg) raise nsx_exc.NsxPluginException(err_msg=msg) except api_exc.NsxApiException as e: msg = _("An exception occurred while updating IP addresses on a " "router logical port:%s") % e LOG.exception(msg) raise nsx_exc.NsxPluginException(err_msg=msg) ROUTER_FUNC_DICT = { 'create_lrouter': { 2: {versioning.DEFAULT_VERSION: create_implicit_routing_lrouter, }, 3: {versioning.DEFAULT_VERSION: create_implicit_routing_lrouter, 1: create_implicit_routing_lrouter_with_distribution, 2: create_explicit_routing_lrouter, }, }, 'update_lrouter': { 2: {versioning.DEFAULT_VERSION: update_implicit_routing_lrouter, }, 3: {versioning.DEFAULT_VERSION: update_implicit_routing_lrouter, 2: update_explicit_routing_lrouter, }, }, 'create_lrouter_dnat_rule': { 2: {versioning.DEFAULT_VERSION: create_lrouter_dnat_rule_v2, }, 3: {versioning.DEFAULT_VERSION: create_lrouter_dnat_rule_v3, }, }, 'create_lrouter_snat_rule': { 2: {versioning.DEFAULT_VERSION: create_lrouter_snat_rule_v2, }, 3: {versioning.DEFAULT_VERSION: create_lrouter_snat_rule_v3, }, }, 'create_lrouter_nosnat_rule': { 2: {versioning.DEFAULT_VERSION: create_lrouter_nosnat_rule_v2, }, 3: {versioning.DEFAULT_VERSION: create_lrouter_nosnat_rule_v3, }, }, 'create_lrouter_nodnat_rule': { 2: {versioning.DEFAULT_VERSION: create_lrouter_nodnat_rule_v2, }, 3: {versioning.DEFAULT_VERSION: create_lrouter_nodnat_rule_v3, }, }, 'get_default_route_explicit_routing_lrouter': { 3: {versioning.DEFAULT_VERSION: get_default_route_explicit_routing_lrouter_v32, 2: get_default_route_explicit_routing_lrouter_v32, }, }, } @versioning.versioned(ROUTER_FUNC_DICT) def create_lrouter(cluster, *args, **kwargs): if kwargs.get('distributed', None): v = cluster.api_client.get_version() if (v.major, v.minor) < (3, 1): raise nsx_exc.InvalidVersion(version=v) return v @versioning.versioned(ROUTER_FUNC_DICT) def get_default_route_explicit_routing_lrouter(cluster, *args, **kwargs): pass @versioning.versioned(ROUTER_FUNC_DICT) def update_lrouter(cluster, *args, **kwargs): if kwargs.get('routes', None): v = cluster.api_client.get_version() if (v.major, v.minor) < (3, 2): raise nsx_exc.InvalidVersion(version=v) return v @versioning.versioned(ROUTER_FUNC_DICT) def create_lrouter_dnat_rule(cluster, *args, **kwargs): pass @versioning.versioned(ROUTER_FUNC_DICT) def create_lrouter_snat_rule(cluster, *args, **kwargs): pass @versioning.versioned(ROUTER_FUNC_DICT) def create_lrouter_nosnat_rule(cluster, *args, **kwargs): pass @versioning.versioned(ROUTER_FUNC_DICT) def create_lrouter_nodnat_rule(cluster, *args, **kwargs): pass vmware-nsx-12.0.1/vmware_nsx/nsxlib/mh/secgroup.py0000666000175100017510000002125713244523345022241 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron_lib import exceptions from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import excutils from vmware_nsx.common import utils from vmware_nsx.nsxlib import mh as nsxlib HTTP_GET = "GET" HTTP_POST = "POST" HTTP_DELETE = "DELETE" HTTP_PUT = "PUT" SECPROF_RESOURCE = "security-profile" LOG = log.getLogger(__name__) def mk_body(**kwargs): """Convenience function creates and dumps dictionary to string. :param kwargs: the key/value pirs to be dumped into a json string. :returns: a json string. """ return jsonutils.dumps(kwargs, ensure_ascii=False) def query_security_profiles(cluster, fields=None, filters=None): return nsxlib.get_all_query_pages( nsxlib._build_uri_path(SECPROF_RESOURCE, fields=fields, filters=filters), cluster) def create_security_profile(cluster, tenant_id, neutron_id, security_profile): """Create a security profile on the NSX backend. :param cluster: a NSX cluster object reference :param tenant_id: identifier of the Neutron tenant :param neutron_id: neutron security group identifier :param security_profile: dictionary with data for configuring the NSX security profile. """ path = "/ws.v1/security-profile" # Allow all dhcp responses and all ingress traffic hidden_rules = {'logical_port_egress_rules': [{'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP, 'port_range_min': constants.DHCP_RESPONSE_PORT, 'port_range_max': constants.DHCP_RESPONSE_PORT, 'ip_prefix': '0.0.0.0/0'}], 'logical_port_ingress_rules': [{'ethertype': 'IPv4'}, {'ethertype': 'IPv6'}]} display_name = utils.check_and_truncate(security_profile.get('name')) # NOTE(salv-orlando): neutron-id tags are prepended with 'q' for # historical reasons body = mk_body( tags=utils.get_tags(os_tid=tenant_id, q_sec_group_id=neutron_id), display_name=display_name, logical_port_ingress_rules=( hidden_rules['logical_port_ingress_rules']), logical_port_egress_rules=hidden_rules['logical_port_egress_rules'] ) rsp = nsxlib.do_request(HTTP_POST, path, body, cluster=cluster) if security_profile.get('name') == 'default': # If security group is default allow ip traffic between # members of the same security profile is allowed and ingress traffic # from the switch rules = {'logical_port_egress_rules': [{'ethertype': 'IPv4', 'profile_uuid': rsp['uuid']}, {'ethertype': 'IPv6', 'profile_uuid': rsp['uuid']}], 'logical_port_ingress_rules': [{'ethertype': 'IPv4'}, {'ethertype': 'IPv6'}]} update_security_group_rules(cluster, rsp['uuid'], rules) LOG.debug("Created Security Profile: %s", rsp) return rsp def update_security_group_rules(cluster, spid, rules): path = "/ws.v1/security-profile/%s" % spid # Allow all dhcp responses in rules['logical_port_egress_rules'].append( {'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP, 'port_range_min': constants.DHCP_RESPONSE_PORT, 'port_range_max': constants.DHCP_RESPONSE_PORT, 'ip_prefix': '0.0.0.0/0'}) # If there are no ingress rules add bunk rule to drop all ingress traffic if not rules['logical_port_ingress_rules']: rules['logical_port_ingress_rules'].append( {'ethertype': 'IPv4', 'ip_prefix': '127.0.0.1/32'}) try: body = mk_body( logical_port_ingress_rules=summarize_security_group_rules(rules[ 'logical_port_ingress_rules']), logical_port_egress_rules=summarize_security_group_rules(rules[ 'logical_port_egress_rules'])) rsp = nsxlib.do_request(HTTP_PUT, path, body, cluster=cluster) except exceptions.NotFound as e: LOG.error(nsxlib.format_exception("Unknown", e, locals())) #FIXME(salvatore-orlando): This should not raise NeutronException raise exceptions.NeutronException() LOG.debug("Updated Security Profile: %s", rsp) return rsp def update_security_profile(cluster, spid, name): return nsxlib.do_request( HTTP_PUT, nsxlib._build_uri_path(SECPROF_RESOURCE, resource_id=spid), jsonutils.dumps({"display_name": utils.check_and_truncate(name)}), cluster=cluster) def delete_security_profile(cluster, spid): path = "/ws.v1/security-profile/%s" % spid try: nsxlib.do_request(HTTP_DELETE, path, cluster=cluster) except exceptions.NotFound: with excutils.save_and_reraise_exception(): # This is not necessarily an error condition LOG.warning("Unable to find security profile %s on NSX " "backend", spid) def summarize_security_group_rules(logical_port_rules): """ Summarizes security group rules and remove duplicates. Given a set of arbitrary security group rules, determining the optimum (minimum) rule set is a complex (NP-hard) problem. This method does not attempt to obtain the optimum rules. Instead, it summarizes a set of common rule patterns. """ # Remove port_range_min & port_range_max if it covers the entire port # range. Also, remove quad-zero default IPv4 and default IPv6 routes for rule in logical_port_rules: if ('port_range_min' in rule and 'port_range_max' in rule and rule['port_range_min'] <= 1 and rule['port_range_max'] == 65535): del rule['port_range_min'] del rule['port_range_max'] if ('ip_prefix' in rule and rule['ip_prefix'] in ['0.0.0.0/0', '::/0']): del rule['ip_prefix'] # Remove duplicate rules. Loop through each rule rule_i and exclude a # rule if it is part of another rule. logical_port_rules_summarized = [] for i in range(len(logical_port_rules)): for j in range(len(logical_port_rules)): if i != j: if is_sg_rules_identical(logical_port_rules[i], logical_port_rules[j]): pass elif is_sg_rule_subset(logical_port_rules[i], logical_port_rules[j]): break else: logical_port_rules_summarized.append(logical_port_rules[i]) return logical_port_rules_summarized def is_sg_rules_identical(sgr1, sgr2): """ determines if security group rule sgr1 and sgr2 are identical """ return (sgr1['ethertype'] == sgr2['ethertype'] and sgr1.get('protocol') == sgr2.get('protocol') and sgr1.get('port_range_min') == sgr2.get('port_range_min') and sgr1.get('port_range_max') == sgr2.get('port_range_max') and sgr1.get('ip_prefix') == sgr2.get('ip_prefix') and sgr1.get('profile_uuid') == sgr2.get('profile_uuid')) def is_sg_rule_subset(sgr1, sgr2): """ determine if security group rule sgr1 is a strict subset of sgr2 """ all_protocols = set(range(256)) sgr1_protocols = {sgr1['protocol']} if 'protocol' in sgr1 else \ all_protocols sgr2_protocols = {sgr2['protocol']} if 'protocol' in sgr2 else \ all_protocols return (sgr1['ethertype'] == sgr2['ethertype'] and sgr1_protocols.issubset(sgr2_protocols) and sgr1.get('port_range_min', 0) >= sgr2.get('port_range_min', 0) and sgr1.get('port_range_max', 65535) <= sgr2.get('port_range_max', 65535) and (sgr2.get('ip_prefix') is None or sgr1.get('ip_prefix') == sgr2.get('prefix')) and (sgr2.get('profile_uuid') is None or sgr1.get('profile_uuid') == sgr2.get('profile_uuid'))) vmware-nsx-12.0.1/vmware_nsx/nsxlib/mh/versioning.py0000666000175100017510000000516313244523345022573 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect from vmware_nsx._i18n import _ from vmware_nsx.api_client import exception DEFAULT_VERSION = -1 def versioned(func_table): def versioned_function(wrapped_func): func_name = wrapped_func.__name__ def dispatch_versioned_function(cluster, *args, **kwargs): # Call the wrapper function, in case we need to # run validation checks regarding versions. It # should return the NSX version v = (wrapped_func(cluster, *args, **kwargs) or cluster.api_client.get_version()) func = get_function_by_version(func_table, func_name, v) func_kwargs = kwargs arg_spec = inspect.getargspec(func) if not arg_spec.keywords and not arg_spec.varargs: # drop args unknown to function from func_args arg_set = set(func_kwargs.keys()) for arg in arg_set - set(arg_spec.args): del func_kwargs[arg] # NOTE(salvatore-orlando): shall we fail here if a required # argument is not passed, or let the called function raise? return func(cluster, *args, **func_kwargs) return dispatch_versioned_function return versioned_function def get_function_by_version(func_table, func_name, ver): if ver: if ver.major not in func_table[func_name]: major = max(func_table[func_name].keys()) minor = max(func_table[func_name][major].keys()) if major > ver.major: raise NotImplementedError(_("Operation may not be supported")) else: major = ver.major minor = ver.minor if ver.minor not in func_table[func_name][major]: minor = DEFAULT_VERSION return func_table[func_name][major][minor] else: msg = _('NSX version is not set. Unable to complete request ' 'correctly. Check log for NSX communication errors.') raise exception.ServiceUnavailable(message=msg) vmware-nsx-12.0.1/vmware_nsx/nsxlib/mh/queue.py0000666000175100017510000000501113244523345021524 0ustar zuulzuul00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import validators from neutron_lib import exceptions as exception from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import excutils import six from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import utils from vmware_nsx.nsxlib import mh as nsxlib HTTP_POST = "POST" HTTP_DELETE = "DELETE" LQUEUE_RESOURCE = "lqueue" LOG = log.getLogger(__name__) def create_lqueue(cluster, queue_data): params = { 'name': 'display_name', 'qos_marking': 'qos_marking', 'min': 'min_bandwidth_rate', 'max': 'max_bandwidth_rate', 'dscp': 'dscp' } queue_obj = dict( (nsx_name, queue_data.get(api_name)) for api_name, nsx_name in six.iteritems(params) if validators.is_attr_set(queue_data.get(api_name)) ) if 'display_name' in queue_obj: queue_obj['display_name'] = utils.check_and_truncate( queue_obj['display_name']) queue_obj['tags'] = utils.get_tags() try: return nsxlib.do_request(HTTP_POST, nsxlib._build_uri_path(LQUEUE_RESOURCE), jsonutils.dumps(queue_obj), cluster=cluster)['uuid'] except api_exc.NsxApiException: # FIXME(salv-orlando): This should not raise NeutronException with excutils.save_and_reraise_exception(): raise exception.NeutronException() def delete_lqueue(cluster, queue_id): try: nsxlib.do_request(HTTP_DELETE, nsxlib._build_uri_path(LQUEUE_RESOURCE, resource_id=queue_id), cluster=cluster) except Exception: # FIXME(salv-orlando): This should not raise NeutronException with excutils.save_and_reraise_exception(): raise exception.NeutronException() vmware-nsx-12.0.1/MANIFEST.in0000666000175100017510000000047413244523345015500 0ustar zuulzuul00000000000000include AUTHORS include README.rst include ChangeLog include LICENSE include vmware_nsx/db/migration/alembic_migrations/script.py.mako recursive-include vmware_nsx/db/migration/alembic_migrations/versions * recursive-include vmware_nsx/neutron/locale * exclude .gitignore exclude .gitreview global-exclude *.pyc vmware-nsx-12.0.1/tools/0000775000175100017510000000000013244524600015066 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/tools/tox_install_project.sh0000777000175100017510000000326013244523345021523 0ustar zuulzuul00000000000000#!/bin/sh # Many of neutron's repos suffer from the problem of depending on neutron, # but it not existing on pypi. # This wrapper for tox's package installer will use the existing package # if it exists, else use zuul-cloner if that program exists, else grab it # from neutron master via a hard-coded URL. That last case should only # happen with devs running unit tests locally. # From the tox.ini config page: # install_command=ARGV # default: # pip install {opts} {packages} PROJ=$1 MOD=$2 shift 2 ZUUL_CLONER=/usr/zuul-env/bin/zuul-cloner neutron_installed=$(echo "import ${MOD}" | python 2>/dev/null ; echo $?) BRANCH_NAME=master PROJ_DIR=${HOME}/src/git.openstack.org/openstack/${PROJ} set -e CONSTRAINTS_FILE=$1 shift install_cmd="pip install" if [ $CONSTRAINTS_FILE != "unconstrained" ]; then install_cmd="$install_cmd -c$CONSTRAINTS_FILE" fi if [ -d "$PROJ_DIR" ]; then echo "FOUND code at $PROJ_DIR - using" $install_cmd -U -e ${PROJ_DIR} elif [ $neutron_installed -eq 0 ]; then echo "ALREADY INSTALLED" > /tmp/tox_install-${PROJ}.txt echo "${PROJ} already installed; using existing package" elif [ -x "$ZUUL_CLONER" ]; then echo "${PROJ} not installed; using zuul cloner" echo "ZUUL CLONER" > /tmp/tox_install-${PROJ}.txt cwd=$(/bin/pwd) cd /tmp $ZUUL_CLONER --cache-dir \ /opt/git \ --branch ${BRANCH_NAME} \ git://git.openstack.org \ openstack/${PROJ} cd openstack/${PROJ} $install_cmd -e . cd "$cwd" else echo "${PROJ} not installed; using egg" echo "PIP HARDCODE" > /tmp/tox_install-${PROJ}.txt $install_cmd -U -egit+https://git.openstack.org/openstack/${PROJ}@${BRANCH_NAME}#egg=${PROJ} fi vmware-nsx-12.0.1/tools/misc-sanity-checks.sh0000777000175100017510000000461013244523345021133 0ustar zuulzuul00000000000000#! /bin/sh # Copyright (C) 2014 VA Linux Systems Japan K.K. # Copyright (C) 2014 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. TMPDIR=`mktemp -d /tmp/${0##*/}.XXXXXX` || exit 1 export TMPDIR trap "rm -rf $TMPDIR" EXIT FAILURES=$TMPDIR/failures check_no_symlinks_allowed () { # Symlinks break the package build process, so ensure that they # do not slip in, except hidden symlinks. if [ $(find . -type l ! -path '*/.*' | wc -l) -ge 1 ]; then echo "Symlinks are not allowed!" >>$FAILURES fi } check_pot_files_errors () { # The job vmware-nsx-propose-translation-update does not update from # transifex since our po files contain duplicate entries where # obsolete entries duplicate normal entries. Prevent obsolete # entries to slip in if [ $(find vmware_nsx -type f -regex '.*\.pot?' | wc -l) -ge 1 ]; then find vmware_nsx -type f -regex '.*\.pot?' \ -print0|xargs -0 -n 1 msgfmt --check-format \ -o /dev/null if [ "$?" -ne 0 ]; then echo "PO files syntax is not correct!" >>$FAILURES fi fi } check_identical_policy_files () { # For unit tests, we maintain their own policy.json file to make test suite # independent of whether it's executed from the vmware-nsx source tree or from # site-packages installation path. We don't want two copies of the same # file to diverge, so checking that they are identical diff etc/policy.json vmware-nsx/tests/etc/policy.json 2>&1 > /dev/null if [ "$?" -ne 0 ]; then echo "policy.json files must be identical!" >>$FAILURES fi } # Add your checks here... check_no_symlinks_allowed check_pot_files_errors #check_identical_policy_files # Fail, if there are emitted failures if [ -f $FAILURES ]; then cat $FAILURES exit 1 fi vmware-nsx-12.0.1/tools/ostestr_compat_shim.sh0000777000175100017510000000025113244523345021520 0ustar zuulzuul00000000000000#!/bin/sh # preserve old behavior of using an arg as a regex when '--' is not present case $@ in (*--*) ostestr $@;; ('') ostestr;; (*) ostestr --regex "$@" esac vmware-nsx-12.0.1/tools/generate_config_file_samples.sh0000777000175100017510000000144013244523345023275 0ustar zuulzuul00000000000000#!/bin/sh # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. set -e GEN_CMD=oslo-config-generator if ! type "$GEN_CMD" > /dev/null; then echo "ERROR: $GEN_CMD not installed on the system." exit 1 fi for file in `ls etc/oslo-config-generator/*`; do $GEN_CMD --config-file=$file done set -x vmware-nsx-12.0.1/tools/with_venv.sh0000777000175100017510000000152513244523345017450 0ustar zuulzuul00000000000000#!/bin/bash # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. tools_path=${tools_path:-$(dirname $0)} venv_path=${venv_path:-${tools_path}} venv_dir=${venv_name:-/../.venv} TOOLS=${tools_path} VENV=${venv:-${venv_path}/${venv_dir}} source $VENV/bin/activate && "$@" vmware-nsx-12.0.1/tools/install_venv.py0000666000175100017510000000465413244523345020164 0ustar zuulzuul00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2010 OpenStack Foundation. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Installation script for Neutron's development virtualenv """ from __future__ import print_function import os import sys import install_venv_common as install_venv def print_help(): help = """ Neutron development environment setup is complete. Neutron development uses virtualenv to track and manage Python dependencies while in development and testing. To activate the Neutron virtualenv for the extent of your current shell session you can run: $ . .venv/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: $ tools/with_venv.sh Also, make test will automatically use the virtualenv. """ print(help) def main(argv): if 'tools_path' in os.environ: root = os.environ['tools_path'] else: root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) if 'venv' in os.environ: venv = os.environ['venv'] else: venv = os.path.join(root, '.venv') pip_requires = os.path.join(root, 'requirements.txt') test_requires = os.path.join(root, 'test-requirements.txt') py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) project = 'Neutron' install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) options = install.parse_args(argv) install.check_python_version() install.check_dependencies() install.create_virtualenv(no_site_packages=options.no_site_packages) install.install_dependencies() print_help() if __name__ == '__main__': sys.exit(main(sys.argv)) vmware-nsx-12.0.1/tools/coding-checks.sh0000777000175100017510000000243613244523345020142 0ustar zuulzuul00000000000000#!/bin/sh set -eu usage () { echo "Usage: $0 [OPTION]..." echo "Run vmware-nsx's coding check(s)" echo "" echo " -Y, --pylint [] Run pylint check on the entire vmware-nsx module or just files changed in basecommit (e.g. HEAD~1)" echo " -h, --help Print this usage message" echo exit 0 } process_options () { i=1 while [ $i -le $# ]; do eval opt=\$$i case $opt in -h|--help) usage;; -Y|--pylint) pylint=1;; *) scriptargs="$scriptargs $opt" esac i=$((i+1)) done } run_pylint () { local target="${scriptargs:-all}" if [ "$target" = "all" ]; then files="vmware_nsx" else case "$target" in *HEAD~[0-9]*) files=$(git diff --diff-filter=AM --name-only $target -- "*.py");; *) echo "$target is an unrecognized basecommit"; exit 1;; esac fi echo "Running pylint..." echo "You can speed this up by running it on 'HEAD~[0-9]' (e.g. HEAD~1, this change only)..." if [ -n "${files}" ]; then pylint --rcfile=.pylintrc --output-format=colorized ${files} else echo "No python changes in this commit, pylint check not required." exit 0 fi } scriptargs= pylint=1 process_options $@ if [ $pylint -eq 1 ]; then run_pylint exit 0 fi vmware-nsx-12.0.1/tools/__init__.py0000666000175100017510000000000013244523345017174 0ustar zuulzuul00000000000000vmware-nsx-12.0.1/tools/install_venv_common.py0000666000175100017510000001350713244523345021531 0ustar zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provides methods needed by installation script for OpenStack development virtual environments. Since this script is used to bootstrap a virtualenv from the system's Python environment, it should be kept strictly compatible with Python 2.6. Synced in from openstack-common """ from __future__ import print_function import optparse import os import subprocess import sys class InstallVenv(object): def __init__(self, root, venv, requirements, test_requirements, py_version, project): self.root = root self.venv = venv self.requirements = requirements self.test_requirements = test_requirements self.py_version = py_version self.project = project def die(self, message, *args): print(message % args, file=sys.stderr) sys.exit(1) def check_python_version(self): if sys.version_info < (2, 6): self.die("Need Python Version >= 2.6") def run_command_with_code(self, cmd, redirect_output=True, check_exit_code=True): """Runs a command in an out-of-process shell. Returns the output of that command. Working directory is self.root. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) output = proc.communicate()[0] if check_exit_code and proc.returncode != 0: self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) return (output, proc.returncode) def run_command(self, cmd, redirect_output=True, check_exit_code=True): return self.run_command_with_code(cmd, redirect_output, check_exit_code)[0] def get_distro(self): if (os.path.exists('/etc/fedora-release') or os.path.exists('/etc/redhat-release')): return Fedora( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) else: return Distro( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) def check_dependencies(self): self.get_distro().install_virtualenv() def create_virtualenv(self, no_site_packages=True): """Creates the virtual environment and installs PIP. Creates the virtual environment and installs PIP only into the virtual environment. """ if not os.path.isdir(self.venv): print('Creating venv...', end=' ') if no_site_packages: self.run_command(['virtualenv', '-q', '--no-site-packages', self.venv]) else: self.run_command(['virtualenv', '-q', self.venv]) print('done.') else: print("venv already exists...") pass def pip_install(self, *args): self.run_command(['tools/with_venv.sh', 'pip', 'install', '--upgrade'] + list(args), redirect_output=False) def install_dependencies(self): print('Installing dependencies with pip (this can take a while)...') # First things first, make sure our venv has the latest pip and # setuptools and pbr self.pip_install('pip>=1.4') self.pip_install('setuptools') self.pip_install('pbr') self.pip_install('-r', self.requirements, '-r', self.test_requirements) def parse_args(self, argv): """Parses command-line arguments.""" parser = optparse.OptionParser() parser.add_option('-n', '--no-site-packages', action='store_true', help="Do not inherit packages from global Python " "install.") return parser.parse_args(argv[1:])[0] class Distro(InstallVenv): def check_cmd(self, cmd): return bool(self.run_command(['which', cmd], check_exit_code=False).strip()) def install_virtualenv(self): if self.check_cmd('virtualenv'): return if self.check_cmd('easy_install'): print('Installing virtualenv via easy_install...', end=' ') if self.run_command(['easy_install', 'virtualenv']): print('Succeeded') return else: print('Failed') self.die('ERROR: virtualenv not found.\n\n%s development' ' requires virtualenv, please install it using your' ' favorite package management tool' % self.project) class Fedora(Distro): """This covers all Fedora-based distributions. Includes: Fedora, RHEL, CentOS, Scientific Linux """ def check_pkg(self, pkg): return self.run_command_with_code(['rpm', '-q', pkg], check_exit_code=False)[1] == 0 def install_virtualenv(self): if self.check_cmd('virtualenv'): return if not self.check_pkg('python-virtualenv'): self.die("Please install 'python-virtualenv'.") super(Fedora, self).install_virtualenv() vmware-nsx-12.0.1/tools/tox_install.sh0000777000175100017510000000132013244523345017770 0ustar zuulzuul00000000000000#! /bin/sh set -e DIR=$(dirname $0) ${DIR}/tox_install_project.sh neutron neutron $* ${DIR}/tox_install_project.sh networking-l2gw networking_l2gw $* ${DIR}/tox_install_project.sh networking-sfc networking_sfc $* ${DIR}/tox_install_project.sh neutron-lbaas neutron_lbaas $* ${DIR}/tox_install_project.sh vmware-nsxlib vmware_nsxlib $* ${DIR}/tox_install_project.sh neutron-fwaas neutron_fwaas $* ${DIR}/tox_install_project.sh neutron-dynamic-routing neutron-dynamic-routing $* ${DIR}/tox_install_project.sh neutron-vpnaas neutron-vpnaas $* CONSTRAINTS_FILE=$1 shift install_cmd="pip install" if [ $CONSTRAINTS_FILE != "unconstrained" ]; then install_cmd="$install_cmd -c$CONSTRAINTS_FILE" fi $install_cmd -U $* vmware-nsx-12.0.1/tools/clean.sh0000777000175100017510000000027413244523345016521 0ustar zuulzuul00000000000000#!/bin/bash rm -rf ./*.deb ./*.tar.gz ./*.dsc ./*.changes rm -rf */*.deb rm -rf ./plugins/**/build/ ./plugins/**/dist rm -rf ./plugins/**/lib/neutron_*_plugin.egg-info ./plugins/neutron-* vmware-nsx-12.0.1/tools/test-setup.sh0000777000175100017510000000350313244523345017552 0ustar zuulzuul00000000000000#!/bin/bash -xe # This script will be run by OpenStack CI before unit tests are run, # it sets up the test system as needed. # Developers should setup their test systems in a similar way. # This setup needs to be run as a user that can run sudo. # The root password for the MySQL database; pass it in via # MYSQL_ROOT_PW. DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} # This user and its password are used by the tests, if you change it, # your tests might fail. DB_USER=openstack_citest DB_PW=openstack_citest sudo -H mysqladmin -u root password $DB_ROOT_PW # It's best practice to remove anonymous users from the database. If # a anonymous user exists, then it matches first for connections and # other connections from that host will not work. sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " DELETE FROM mysql.user WHERE User=''; FLUSH PRIVILEGES; GRANT ALL PRIVILEGES ON *.* TO '$DB_USER'@'%' identified by '$DB_PW' WITH GRANT OPTION;" # Now create our database. mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " SET default_storage_engine=MYISAM; DROP DATABASE IF EXISTS openstack_citest; CREATE DATABASE openstack_citest CHARACTER SET utf8;" # Same for PostgreSQL # Setup user root_roles=$(sudo -H -u postgres psql -t -c " SELECT 'HERE' from pg_roles where rolname='$DB_USER'") if [[ ${root_roles} == *HERE ]];then sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" else sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" fi # Store password for tests cat << EOF > $HOME/.pgpass *:*:*:$DB_USER:$DB_PW EOF chmod 0600 $HOME/.pgpass # Now create our database psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest" createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest vmware-nsx-12.0.1/api-ref/0000775000175100017510000000000013244524600015251 5ustar zuulzuul00000000000000vmware-nsx-12.0.1/api-ref/rest.md0000666000175100017510000006264113244523345016570 0ustar zuulzuul00000000000000# VMware NSX OpenStack Neutron REST API Extensions ## General Information This document describes the REST API extensions integrated with the VMware [NSX OpenStack neutron plugins](https://wiki.openstack.org/wiki/Neutron/VMware_NSX_plugins). The intent of this document is to supplement the [OpenStack neutron REST API guide](https://developer.openstack.org/api-ref/networking/v2) by describing the extensions implemented by the VMware NSX neutron plugins. The VMware NSX neutron plugins implement [Neutron API extensions](https://wiki.openstack.org/wiki/NeutronDevelopment#API_Extensions) by defining new top-level REST resources, operations (e.g. verbs) and attribute extensions to existing neutron REST API entities (depending on the extension). As all extensions apply to the neutron REST API, the [general information](https://developer.openstack.org/api-ref/networking/v2/#general-information) for the neutron API applies here as well. The VMware NSX neutron extensions supported by your plugin will depend on the version of VMware NSX used. Two versions described herein are: * [NSX for vSphere](https://www.vmware.com/support/pubs/nsx_pubs.html) aka 'NSX-v'. * [NSX Transformers](https://my.vmware.com/web/vmware/details?productId=580&downloadGroup=NSX-v3-101) aka 'NSX-v3'. ## API Reference * [Advanced Service Providers](#advanced-service-providers) * [DHCP MTU](#dhcp-mtu) * [DNS Search Domain](#dns-search-domain) * [MAC Learning](#mac-learning) * [Provider Networks](#provider-networks) * [Provider Security Groups](#provider-security-groups) * [Router Size](#router-size) * [Router Type](#router-type) * [Security Group Rule IP Prefix](#security-group-rule-ip-prefix) * [Security Group Logging](#security-group-logging) * [VNIC Index](#vnic-index) ### [Advanced Service Providers](#advanced-service-providers) ###### Description This resource attribute extensions adds the `advanced_service_providers` attribute to neutron [subnets](https://developer.openstack.org/api-ref/networking/v2/#subnets). This read-only attribute is a list of NSX advanced service provider IDs associated on a per-subnet basis. The advanced service provider IDs are populated by the plugin automatically when interfacing with the NSX manager backend. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-v. ###### Supported Verbs None (read-only). ###### Extended Resource [subnet](https://developer.openstack.org/api-ref/networking/v2/#subnets) ###### Extension Attribute(s) * `advanced_service_providers`: A list of NSX advanced service provider IDs (in `string` format) associated with the subnet. ###### Example Response ```json { "subnet":{ "description":"", "enable_dhcp":true, "network_id":"7ea9964a-45b0-45eb-8b67-da47ce53cf5f", "tenant_id":"64b39295ba3942ca8be4a8a25d9b5157", "created_at":"2016-08-28T13:49:32", "dns_nameservers":[ ], "updated_at":"2016-08-28T13:49:32", "gateway_ip":"10.0.0.1", "ipv6_ra_mode":null, "allocation_pools":[ { "start":"10.0.0.2", "end":"10.0.0.254" } ], "host_routes":[ ], "advanced_service_providers":[ "edge-1", "edge-2" ], "ip_version":4, "ipv6_address_mode":null, "cidr":"10.0.0.0/24", "id":"f1153a28-8f36-4547-a024-3eb08e4e44b1", "subnetpool_id":null, "name":"private-subnet" } } ``` ### [DHCP MTU](#dhcp-mtu) ###### Description Extends neutron [subnets](https://developer.openstack.org/api-ref/networking/v2/#subnets) providing the ability to specify per-subnet DHCP MTU via the `dhcp_mtu` attribute. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-v. ###### Supported Verbs POST, PUT ###### Extended Resource [subnet](https://developer.openstack.org/api-ref/networking/v2/#subnets) ###### Extension Attribute(s) * `dhcp_mtu`: The DHCP MTU to use for the associated subnet. Must be a valid DHCP MTU value between 68 and 65535. ###### Example Response ```json { "subnet":{ "description":"", "enable_dhcp":true, "network_id":"91abf611-44a8-4c5e-bf19-92f91ee34d6d", "tenant_id":"16f24183154f4e51bebe3f10e810e19a", "created_at":"2016-09-16T16:28:34", "dhcp_mtu": 8048, "dns_nameservers":[ ], "updated_at":"2016-09-16T16:28:34", "gateway_ip":"192.168.1.1", "ipv6_ra_mode":null, "allocation_pools":[ { "start":"192.168.1.9", "end":"192.168.1.99" } ], "host_routes":[ ], "revision_number":2, "ip_version":4, "ipv6_address_mode":null, "cidr":"192.168.1.0/24", "project_id":"16f24183154f4e51bebe3f10e810e19a", "id":"8300a4ff-09db-4f64-955b-7e215044c9c3", "subnetpool_id":null, "name":"snet1" } } ``` ### [DNS Search Domain](#dns-search-domain) ###### Description Extends neutron [subnets](https://developer.openstack.org/api-ref/networking/v2/#subnets) providing the ability to specify per-subnet DNS search via the `dns_search_domain` attribute. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-v. ###### Supported Verbs POST, PUT ###### Extended Resource [subnet](https://developer.openstack.org/api-ref/networking/v2/#subnets) ###### Extension Attribute(s) * `dns_search_domain`: The DNS search domain to use for networking on the associated subnet. The value must be a valid DNS search domain. ###### Example Response ```json { "subnet":{ "description":"", "enable_dhcp":true, "network_id":"91abf611-44a8-4c5e-bf19-92f91ee34d6d", "tenant_id":"16f24183154f4e51bebe3f10e810e19a", "created_at":"2016-09-16T16:28:34", "dns_search_domain": "example.com", "dns_nameservers":[ ], "updated_at":"2016-09-16T16:28:34", "gateway_ip":"192.168.1.1", "ipv6_ra_mode":null, "allocation_pools":[ { "start":"192.168.1.9", "end":"192.168.1.99" } ], "host_routes":[ ], "revision_number":2, "ip_version":4, "ipv6_address_mode":null, "cidr":"192.168.1.0/24", "project_id":"16f24183154f4e51bebe3f10e810e19a", "id":"8300a4ff-09db-4f64-955b-7e215044c9c3", "subnetpool_id":null, "name":"snet1" } } ``` ### [MAC Learning](#mac-learning) ###### Description Extends neutron [ports](https://developer.openstack.org/api-ref/networking/v2/#ports) providing the ability to enable MAC learning on the associated port via the `mac_learning_enabled` attribute. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-v3. ###### Supported Verbs POST, PUT ###### Extended Resource [ports](https://developer.openstack.org/api-ref/networking/v2/#ports) ###### Extension Attribute(s) * `mac_learning_enabled`: A boolean value that indicates if MAC Learning is enabled on the associated port. ###### Example Response ```json { "port":{ "allowed_address_pairs":[ ], "extra_dhcp_opts":[ ], "updated_at":"2016-09-16T16:28:35", "device_owner":"network:dhcp", "revision_number":3, "port_security_enabled":false, "mac_learning_enabled":true, "fixed_ips":[ { "subnet_id":"8300a4ff-09db-4f64-955b-7e215044c9c3", "ip_address":"192.168.1.9" } ], "id":"0093f4cc-f936-448a-9a25-ae57f66a6d57", "security_groups":[ ], "binding:vif_details":{ "port_filter":true, "nsx-logical-switch-id":"785f0bb4-3341-4e8c-abc4-cd3068f333f2" }, "binding:vif_type":"ovs", "mac_address":"fa:16:3e:2d:19:96", "project_id":"16f24183154f4e51bebe3f10e810e19a", "status":"ACTIVE", "binding:host_id":"l2b", "description":"", "device_id":"dhcp559b5e8d-0b9d-5e4c-a8ff-819ade66d01d-91abf611-44a8-4c5e-bf19-92f91ee34d6d", "name":"", "admin_state_up":true, "network_id":"91abf611-44a8-4c5e-bf19-92f91ee34d6d", "tenant_id":"16f24183154f4e51bebe3f10e810e19a", "created_at":"2016-09-16T16:28:35", "provider_security_groups":[ ], "binding:vnic_type":"normal" } } ``` ### [Provider Networks](#provider-networks) ###### Description The VMware NSX neutron plugins also support the [neutron provider networks extension](https://docs.openstack.org/neutron/latest/admin/archives/adv-features.html#provider-networks). Provider network extensions add [attributes](https://docs.openstack.org/neutron/latest/admin/archives/adv-features.html#provider-attributes) to neutron [networks](https://developer.openstack.org/api-ref/networking/v2/#networks) enabling providers to map virtual networks onto physical networks, or in this case onto physical networks in NSX. ###### Extension Type Resource attribute extensions. ###### Supported NSX Versions NSX-v3, NSX-v. ###### Supported Verbs See the [neutron provider networks extension](https://developer.openstack.org/api-ref/networking/v2/#networks-provider-extended-attributes-networks) API reference documentation. ###### Extended Resource * [networks](https://developer.openstack.org/api-ref/networking/v2/#networks) ###### Extension Attribute(s) * `provider:network_type`: For the NSX plugins valid values are `flat` or `vlan`. * `provider:physical_network`: For the NSX plugins, this value should be the UUID of the NSX transport zone to bridge the network on. * `provider:segmentation_id`: For the NSX plugins, this value should be set to the VLAN identifier of the physical network, or unset of the network type is `flat`. ###### Example Response ```json { "network": { "status": "ACTIVE", "subnets": [ "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" ], "name": "private-network", "router:external": false, "admin_state_up": true, "tenant_id": "4fd44f30292945e481c7b8a0c8908869", "created_at": "2016-03-08T20:19:41", "mtu": 0, "shared": true, "port_security_enabled": true, "provider:network_type": "vlan", "provider:physical_network": "00cff66d-5fa8-4fda-bd7d-87e372fe86c7", "provider:segmentation_id": 101, "updated_at": "2016-03-08T20:19:41", "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" } } ``` ### [Provider Security Groups](#provider-security-groups) ###### Description This extension enables support for provider-only created/managed neutron [security groups](https://developer.openstack.org/api-ref/networking/v2/#security-groups-security-groups). To enable this support a `provider` boolean attribute is added to neutron security groups indicating if the group is a provider-only group. Additionally, neutron [ports](https://developer.openstack.org/api-ref/networking/v2/#ports) are extended with a `provider_security_groups` attribute that indicates a list of provider-only security groups belonging to the said port. ###### Extension Type Resource attribute extensions. ###### Supported NSX Versions NSX-v3, NSX-v. ###### Supported Verbs The `provider` attribute on neutron security groups is only settable during creation (POST). However the `provider_security_groups` attribute on ports supports both POST and PUT. ###### Extended Resource * [ports](https://developer.openstack.org/api-ref/networking/v2/#ports) * [security groups](https://developer.openstack.org/api-ref/networking/v2/#security-groups-security-groups) ###### Extension Attribute(s) * `provider`: A boolean indicating if the security group is provider-only. * `provider_security_groups`: A list of provider-only security group UUIDs associated with a said port. ###### Example Response GET security-group ```json { "security_group":{ "logging":false, "description":"My security group", "tenant_id":"1efff4cd762944a6bbdb6d3bba0468ef", "created_at":"2016-09-16T16:34:55", "updated_at":"2016-09-16T16:34:55", "provider":true, "security_group_rules":[ { "local_ip_prefix":null, "direction":"ingress", "protocol":null, "description":null, "port_range_max":null, "updated_at":"2016-09-16T16:34:55", "revision_number":1, "id":"98acaf6e-0b9d-45d6-b4ec-d9dd0df3a52b", "remote_group_id":"3a729518-0214-44d6-9f25-704db70710a5", "remote_ip_prefix":null, "created_at":"2016-09-16T16:34:55", "security_group_id":"3a729518-0214-44d6-9f25-704db70710a5", "tenant_id":"1efff4cd762944a6bbdb6d3bba0468ef", "port_range_min":null, "ethertype":"IPv6", "project_id":"1efff4cd762944a6bbdb6d3bba0468ef" }, { "local_ip_prefix":null, "direction":"egress", "protocol":null, "description":null, "port_range_max":null, "updated_at":"2016-09-16T16:34:55", "revision_number":1, "id":"9fba2f50-9eef-48c0-8b45-c2fae98e7294", "remote_group_id":null, "remote_ip_prefix":null, "created_at":"2016-09-16T16:34:55", "security_group_id":"3a729518-0214-44d6-9f25-704db70710a5", "tenant_id":"1efff4cd762944a6bbdb6d3bba0468ef", "port_range_min":null, "ethertype":"IPv4", "project_id":"1efff4cd762944a6bbdb6d3bba0468ef" }, { "local_ip_prefix":null, "direction":"egress", "protocol":null, "description":null, "port_range_max":null, "updated_at":"2016-09-16T16:34:55", "revision_number":1, "id":"c2eecacb-5328-4081-8fe7-701777fbb2a1", "remote_group_id":null, "remote_ip_prefix":null, "created_at":"2016-09-16T16:34:55", "security_group_id":"3a729518-0214-44d6-9f25-704db70710a5", "tenant_id":"1efff4cd762944a6bbdb6d3bba0468ef", "port_range_min":null, "ethertype":"IPv6", "project_id":"1efff4cd762944a6bbdb6d3bba0468ef" }, { "local_ip_prefix":null, "direction":"ingress", "protocol":null, "description":null, "port_range_max":null, "updated_at":"2016-09-16T16:34:55", "revision_number":1, "id":"e073a066-bc14-41e7-939b-84ec4af0606f", "remote_group_id":"3a729518-0214-44d6-9f25-704db70710a5", "remote_ip_prefix":null, "created_at":"2016-09-16T16:34:55", "security_group_id":"3a729518-0214-44d6-9f25-704db70710a5", "tenant_id":"1efff4cd762944a6bbdb6d3bba0468ef", "port_range_min":null, "ethertype":"IPv4", "project_id":"1efff4cd762944a6bbdb6d3bba0468ef" } ], "revision_number":1, "provider":false, "project_id":"1efff4cd762944a6bbdb6d3bba0468ef", "id":"3a729518-0214-44d6-9f25-704db70710a5", "name":"my provider group" } } ``` GET port ```json { "port":{ "allowed_address_pairs":[ ], "extra_dhcp_opts":[ ], "updated_at":"2016-09-16T16:28:35", "device_owner":"network:dhcp", "revision_number":3, "port_security_enabled":false, "provider_security_groups":["910da4ff-09db-4f64-955b-7e215044ca56"], "fixed_ips":[ { "subnet_id":"8300a4ff-09db-4f64-955b-7e215044c9c3", "ip_address":"192.168.1.9" } ], "id":"0093f4cc-f936-448a-9a25-ae57f66a6d57", "security_groups":[ ], "binding:vif_details":{ "port_filter":true, "nsx-logical-switch-id":"785f0bb4-3341-4e8c-abc4-cd3068f333f2" }, "binding:vif_type":"ovs", "mac_address":"fa:16:3e:2d:19:96", "project_id":"16f24183154f4e51bebe3f10e810e19a", "status":"ACTIVE", "binding:host_id":"l2b", "description":"", "device_id":"dhcp559b5e8d-0b9d-5e4c-a8ff-819ade66d01d-91abf611-44a8-4c5e-bf19-92f91ee34d6d", "name":"", "admin_state_up":true, "network_id":"91abf611-44a8-4c5e-bf19-92f91ee34d6d", "tenant_id":"16f24183154f4e51bebe3f10e810e19a", "created_at":"2016-09-16T16:28:35", "provider_security_groups":[ ], "binding:vnic_type":"normal" } } ``` ### [Router Size](#router-size) ###### Description Extends neutron [routers](https://developer.openstack.org/api-ref/networking/v2/#routers-routers) by adding the `router_size` attribute to support configuration of NSX-v edge size. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-v. ###### Supported Verbs POST, PUT ###### Extended Resource [routers](https://developer.openstack.org/api-ref/networking/v2/#routers-routers) ###### Extension Attribute(s) * `router_size`: The NSX-v edge size to use. ###### Example Response ```json { "router":{ "admin_state_up":true, "availability_zone_hints":[ ], "availability_zones":[ "nova" ], "description":"", "router_size":"xlarge", "distributed":false, "external_gateway_info":{ "enable_snat":true, "external_fixed_ips":[ { "ip_address":"172.24.4.6", "subnet_id":"b930d7f6-ceb7-40a0-8b81-a425dd994ccf" }, { "ip_address":"2001:db8::9", "subnet_id":"0c56df5d-ace5-46c8-8f4c-45fa4e334d18" } ], "network_id":"ae34051f-aa6c-4c75-abf5-50dc9ac99ef3" }, "ha":false, "id":"f8a44de0-fc8e-45df-93c7-f79bf3b01c95", "name":"router1", "routes":[ ], "status":"ACTIVE", "tenant_id":"0bd18306d801447bb457a46252d82d13" } } ``` ### [Router Type](#router-type) ###### Description Extends neutron [routers](https://developer.openstack.org/api-ref/networking/v2/#routers-routers) by adding the `router_type` attribute to support configuration of NSX-v router type. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-v. ###### Supported Verbs POST, PUT ###### Extended Resource [routers](https://developer.openstack.org/api-ref/networking/v2/#routers-routers) ###### Extension Attribute(s) * `router_type`: The NSX-v router type. Must be either `shared` or `exclusive`. ###### Example Response ```json { "router":{ "admin_state_up":true, "availability_zone_hints":[ ], "availability_zones":[ "nova" ], "description":"", "router_type":"exclusive", "distributed":false, "external_gateway_info":{ "enable_snat":true, "external_fixed_ips":[ { "ip_address":"172.24.4.6", "subnet_id":"b930d7f6-ceb7-40a0-8b81-a425dd994ccf" }, { "ip_address":"2001:db8::9", "subnet_id":"0c56df5d-ace5-46c8-8f4c-45fa4e334d18" } ], "network_id":"ae34051f-aa6c-4c75-abf5-50dc9ac99ef3" }, "ha":false, "id":"f8a44de0-fc8e-45df-93c7-f79bf3b01c95", "name":"router1", "routes":[ ], "status":"ACTIVE", "tenant_id":"0bd18306d801447bb457a46252d82d13" } } ``` ### [Security Group Rule IP Prefix](#security-group-rule-ip-prefix) ###### Description Extends neutron [security group rules](https://developer.openstack.org/api-ref/networking/v2/#security-group-rules-security-group-rules) by adding a `local_ip_prefix` attribute allowing rules to be created with IP prefixes. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-v3, NSXv. ###### Supported Verbs POST; using an IP prefix on a rule can only be done when creating the rule. ###### Extended Resource [security group rules](https://developer.openstack.org/api-ref/networking/v2/#security-group-rules-security-group-rules) ###### Extension Attribute(s) * `local_ip_prefix`: The local IP prefix used for the rule. ###### Example Response ```json { "security_group_rule":{ "direction":"ingress", "port_range_min":"80", "ethertype":"IPv4", "port_range_max":"80", "protocol":"tcp", "local_prefix_ip":"239.240.1.0/16", "remote_prefix_ip":"192.168.1.0/24", "security_group_id":"a7734e61-b545-452d-a3cd-0189cbd9747a" } } ``` ### [Security Group Logging](#security-group-logging) ###### Description Extends neutron [security groups](https://developer.openstack.org/api-ref/networking/v2/#security-groups-security-groups) with a boolean attribute `logging` to enable per security group logging on NSX. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-v3, NSX-v. ###### Supported Verbs POST, PUT. ###### Extended Resource [security groups](https://developer.openstack.org/api-ref/networking/v2/#security-groups-security-groups) ###### Extension Attribute(s) * `logging`: A boolean attribute indicating if logging is enabled for the group. ###### Example Response ```json { "security_group":{ "Description":"logged secgroup", "id":"85cc3048-abc3-43cc-89b3-377341426ac5", "name":"logged secgroup", "logging":true, "security_group_rules":[ { "direction":"egress", "ethertype":"IPv6", "id":"3c0e45ff-adaf-4124-b083-bf390e5482ff", "port_range_max":null, "port_range_min":null, "protocol":null, "remote_group_id":null, "remote_ip_prefix":null, "security_group_id":"85cc3048-abc3-43cc-89b3-377341426ac5", "tenant_id":"e4f50856753b4dc6afee5fa6b9b6c550" }, { "direction":"egress", "ethertype":"IPv4", "id":"93aa42e5-80db-4581-9391-3a608bd0e448", "port_range_max":null, "port_range_min":null, "protocol":null, "remote_group_id":null, "remote_ip_prefix":null, "security_group_id":"85cc3048-abc3-43cc-89b3-377341426ac5", "tenant_id":"e4f50856753b4dc6afee5fa6b9b6c550" }, { "direction":"ingress", "ethertype":"IPv6", "id":"c0b09f00-1d49-4e64-a0a7-8a186d928138", "port_range_max":null, "port_range_min":null, "protocol":null, "remote_group_id":"85cc3048-abc3-43cc-89b3-377341426ac5", "remote_ip_prefix":null, "security_group_id":"85cc3048-abc3-43cc-89b3-377341426ac5", "tenant_id":"e4f50856753b4dc6afee5fa6b9b6c550" }, { "direction":"ingress", "ethertype":"IPv4", "id":"f7d45c89-008e-4bab-88ad-d6811724c51c", "port_range_max":null, "port_range_min":null, "protocol":null, "remote_group_id":"85cc3048-abc3-43cc-89b3-377341426ac5", "remote_ip_prefix":null, "security_group_id":"85cc3048-abc3-43cc-89b3-377341426ac5", "tenant_id":"e4f50856753b4dc6afee5fa6b9b6c550" } ], "tenant_id":"e4f50856753b4dc6afee5fa6b9b6c550" } } ``` ### [VNIC Index](#vnic-index) ###### Description Extends neutron [ports](https://developer.openstack.org/api-ref/networking/v2/#ports) by adding the `vnic_index` attribute enabling per-port assignment of a VNIC index. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-v. ###### Supported Verbs POST, PUT. ###### Extended Resource [ports](https://developer.openstack.org/api-ref/networking/v2/#ports) ###### Extension Attribute(s) * `vnic_index`: The VNIC index (integer value) assigned to the port. ###### Example Response ```json { "port":{ "status":"ACTIVE", "vnic_index":3, "name":"", "allowed_address_pairs":[ ], "admin_state_up":true, "network_id":"a87cc70a-3e15-4acf-8205-9b711a3531b7", "tenant_id":"7e02058126cc4950b75f9970368ba177", "created_at":"2016-03-08T20:19:41", "extra_dhcp_opts":[ ], "device_owner":"network:router_interface", "mac_address":"fa:16:3e:23:fd:d7", "fixed_ips":[ { "subnet_id":"a0304c3a-4f08-4c43-88af-d796509c97d2", "ip_address":"10.0.0.1" } ], "id":"46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2", "updated_at":"2016-03-08T20:19:41", "security_groups":[ ], "device_id":"5e3898d7-11be-483e-9732-b2f5eccd2b2e" } } ``` vmware-nsx-12.0.1/.mailmap0000666000175100017510000000111613244523345015355 0ustar zuulzuul00000000000000# Format is: # # lawrancejing Jiajun Liu Zhongyue Luo Kun Huang Zhenguo Niu Isaku Yamahata Isaku Yamahata Morgan Fainberg vmware-nsx-12.0.1/tox.ini0000666000175100017510000000722513244523413015252 0ustar zuulzuul00000000000000[tox] envlist = py35,py27,pep8,docs minversion = 1.6 skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONWARNINGS=default::DeprecationWarning passenv = TRACE_FAILONLY GENERATE_HASHES http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY usedevelop = True install_command = {toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?stable/queens} {opts} {packages} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt whitelist_externals = sh commands = {toxinidir}/tools/ostestr_compat_shim.sh {posargs} # there is also secret magic in ostestr which lets you run in a fail only # mode. To do this define the TRACE_FAILONLY environmental variable. [testenv:common] # Fake job to define environment variables shared between dsvm/non-dsvm jobs setenv = OS_TEST_TIMEOUT=180 commands = false [testenv:functional] basepython = python2.7 setenv = {[testenv]setenv} {[testenv:common]setenv} OS_TEST_PATH=./vmware_nsx/tests/functional OS_LOG_PATH={env:OS_LOG_PATH:/opt/stack/logs} deps = {[testenv]deps} -r{toxinidir}/vmware_nsx/tests/functional/requirements.txt [testenv:dsvm-functional] basepython = python2.7 setenv = OS_SUDO_TESTING=1 OS_FAIL_ON_MISSING_DEPS=1 OS_TEST_TIMEOUT=180 sitepackages=True deps = {[testenv:functional]deps} commands = {toxinidir}/tools/ostestr_compat_shim.sh {posargs} [tox:jenkins] sitepackages = True [testenv:releasenotes] deps = -r{toxinidir}/doc/requirements.txt commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:py27] setenv = OS_FAIL_ON_MISSING_DEPS=1 [testenv:pep8] basepython = python2.7 deps = {[testenv]deps} commands = # If it is easier to add a check via a shell script, consider adding it in this file sh ./tools/misc-sanity-checks.sh # Checks for coding and style guidelines flake8 sh ./tools/coding-checks.sh --pylint '{posargs}' neutron-db-manage --subproject vmware-nsx check_migration {[testenv:genconfig]commands} whitelist_externals = sh bash [testenv:bandit] deps = -r{toxinidir}/test-requirements.txt commands = bandit -r vmware_nsx -n 5 -ll [testenv:cover] basepython = python2.7 commands = python setup.py testr --coverage --testr-args='{posargs}' coverage report [testenv:venv] commands = {posargs} [testenv:docs] deps = -r{toxinidir}/doc/requirements.txt commands = sphinx-build -b html doc/source doc/build/html [flake8] # E125 continuation line does not distinguish itself from next logical line # E126 continuation line over-indented for hanging indent # E128 continuation line under-indented for visual indent # E129 visually indented line with same indent as next logical line # E265 block comment should start with ‘# ‘ # H305 imports not grouped correctly # H307 like imports should be grouped together # H404 multi line docstring should start with a summary # H405 multi line docstring summary not separated with an empty line # H904 Wrap long lines in parentheses instead of a backslash # TODO(dougwig) -- uncomment this to test for remaining linkages # N530 direct neutron imports not allowed # N531 translations hints ignore = E125,E126,E128,E129,E265,H305,H307,H404,H405,H904,N530,N531 show-source = true builtins = _ exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,.ropeproject import-order-style = pep8 [hacking] import_exceptions = vmware_nsx._i18n, local-check-factory = neutron_lib.hacking.checks.factory [testenv:genconfig] commands = {toxinidir}/tools/generate_config_file_samples.sh [testenv:uuidgen] commands = check-uuid --fix